prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#coding=utf8
import os
import numpy as np
import pickle
# from lib.net.point_rcnn import PointRCNN
# from lib.datasets.mada_rcnn_dataset import MadaRCNNDataset
# import tools.train_utils.train_utils as train_utils
# from lib.utils.bbox_transform import decode_bbox_target
# from tools.kitti_object_eval_python.visualize_common import VisualizePcd, quaternion_from_euler
# from lib.config import cfg, cfg_from_file, save_config_to_file, cfg_from_list
# import lib.utils.kitti_utils as kitti_utils
# import lib.utils.iou3d.iou3d_utils as iou3d_utils
import logging
import math
import re
import glob
import time
import rospy
from sensor_msgs.msg import PointCloud2
from sensor_msgs import point_cloud2 as pc2
from std_msgs.msg import Header
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray
from jsk_rviz_plugins.msg import Pictogram,PictogramArray
import sys
from pynput.keyboard import Controller, Key, Listener
from pynput import keyboard
import json
# import struct
FIXED_FRAME = 'pandar'
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# code from /opt/ros/kinetic/lib/python2.7/dist-packages/tf/transformations.py
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> np.allclose(q, [0.310622, -0.718287, 0.444435, 0.435953])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
# print("ak : {}".format(type(ak)))
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
quaternion = np.empty((4, ), dtype=np.float64)
if repetition:
quaternion[i] = cj*(cs + sc)
quaternion[j] = sj*(cc + ss)
quaternion[k] = sj*(cs - sc)
quaternion[3] = cj*(cc - ss)
else:
quaternion[i] = cj*sc - sj*cs
quaternion[j] = cj*ss + sj*cc
quaternion[k] = cj*cs - sj*sc
quaternion[3] = cj*cc + sj*ss
if parity:
quaternion[j] *= -1
return quaternion
# /velodyne_points topic's subscriber callback function
# publishing function for DEBUG
def publish_test(np_p_ranged, frame_id):
header = Header()
header.stamp = rospy.Time()
header.frame_id = frame_id
x = np_p_ranged[:, 0].reshape(-1)
y = np_p_ranged[:, 1].reshape(-1)
z = np_p_ranged[:, 2].reshape(-1)
# if intensity field exists
if np_p_ranged.shape[1] == 4:
i = np_p_ranged[:, 3].reshape(-1)
else:
i = np.zeros((np_p_ranged.shape[0], 1)).reshape(-1)
cloud = np.stack((x, y, z, i))
# point cloud segments
# 4 PointFields as channel description
msg_segment = pc2.create_cloud(header=header,
fields=_make_point_field(4),
points=cloud.T)
# publish to /velodyne_points_modified
point_pub.publish(msg_segment) # DEBUG
# code from SqueezeSeg (inspired from Durant35)
def hv_in_range(x, y, z, fov, fov_type='h'):
"""
Extract filtered in-range velodyne coordinates based on azimuth & elevation angle limit
Args:
`x`:velodyne points x array
`y`:velodyne points y array
`z`:velodyne points z array
`fov`:a two element list, e.g.[-45,45]
`fov_type`:the fov type, could be `h` or 'v',defualt in `h`
Return:
`cond`:condition of points within fov or not
Raise:
`NameError`:"fov type must be set between 'h' and 'v' "
"""
d = np.sqrt(x ** 2 + y ** 2 + z ** 2)
if fov_type == 'h':
return np.logical_and(np.arctan2(y, x) > (-fov[1] * np.pi/180), np.arctan2(y, x) < (-fov[0] * np.pi/180))
elif fov_type == 'v':
return np.logical_and(np.arctan2(z, d) < (fov[1] * np.pi / 180), np.arctan2(z, d) > (fov[0] * np.pi / 180))
else:
raise NameError("fov type must be set between 'h' and 'v' ")
def _make_point_field(num_field):
msg_pf1 = pc2.PointField()
msg_pf1.name = np.str('x')
msg_pf1.offset = np.uint32(0)
msg_pf1.datatype = np.uint8(7)
msg_pf1.count = np.uint32(1)
msg_pf2 = pc2.PointField()
msg_pf2.name = np.str('y')
msg_pf2.offset = np.uint32(4)
msg_pf2.datatype = np.uint8(7)
msg_pf2.count = np.uint32(1)
msg_pf3 = pc2.PointField()
msg_pf3.name = np.str('z')
msg_pf3.offset = np.uint32(8)
msg_pf3.datatype = np.uint8(7)
msg_pf3.count = np.uint32(1)
msg_pf4 = pc2.PointField()
msg_pf4.name = np.str('intensity')
msg_pf4.offset = np.uint32(16)
msg_pf4.datatype = np.uint8(7)
msg_pf4.count = np.uint32(1)
if num_field == 4:
return [msg_pf1, msg_pf2, msg_pf3, msg_pf4]
msg_pf5 = pc2.PointField()
msg_pf5.name = | np.str('label') | numpy.str |
import numpy as np
import os
import matplotlib.pyplot as plt
from PIL import Image
from ncpsort.utils.clustering import get_topn_clusters
DEFAULT_COLORS = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C8', 'C9', 'C7',
'black', 'blue', 'red', 'green', 'magenta', 'brown', 'orange']
def plot_raw_spikes_in_rows(waveforms, assignments, spacing=1, width=1, vscale=1,
subplot_adj=0.9, colors=DEFAULT_COLORS, figtitle="",
figdir="./", fname_postfix="", show=True):
"""Plot raw spikes, each spike in a separate row and each channel in a separate column
Args:
waveforms: a numpy array of shape (n_samples, n_timesteps, n_channels)
assignments: a numpy array of shape (n_samples,)
"""
waveforms = waveforms.transpose((0, 2, 1)) # [N, n_chs, n_times]
n_samples = waveforms.shape[0]
n_chs = waveforms.shape[1]
n_unit = len(set(assignments))
waveforms_plot = waveforms * vscale - \
np.reshape(np.arange(n_samples), (-1, 1, 1)) * spacing * 8
fig_height = 1
fig, axes = plt.subplots(1, n_chs, figsize=(
width * n_chs, 2 + (n_samples - 1) * spacing / 4), sharey=True)
fontsize = 15
# plt.ylim(np.percentile(waveforms, 0.1), np.percentile(waveforms, 99.9))
plt.ylim(np.min(waveforms_plot) - 2, np.max(waveforms_plot) + 2)
units = np.unique(assignments[assignments != -1])
for chid in range(n_chs):
for unit in units:
axes[chid].plot(waveforms_plot[assignments == unit, chid, :].T,
color=DEFAULT_COLORS[unit % 20], alpha=0.8, label="unit {}".format(unit))
if | np.sum(assignments == -1) | numpy.sum |
import numpy as np
import scipy as sp
import scipy.spatial
import scipy.signal
import numexpr as ne
import warnings
import os
from ...misc.basic_functions import rowsum, differentiation_matrix, differentiate
from ...kernels.high_level.laplace import Laplace_Layer_Form, Laplace_Layer_Apply
from ...kernels.high_level.cauchy import Cauchy_Layer_Form, Cauchy_Layer_Apply
class Laplace_Close_Quad(object):
"""
Module providing Laplace Close Eval based on Globaly Compensated Cauchy Quad
"""
def __init__(self, GSB):
"""
Initializes the Close Quad Module
GSB (required): boundary of type Global_Smooth_Boundary
"""
self.boundary = GSB
GSB.add_module('Laplace_CSLP_Self_Kress')
inside_point = self.boundary.get_inside_point()
complex_weights = self.boundary.complex_weights
c = self.boundary.c
t = self.boundary.t
N = self.boundary.N
self.sawlog = -1j*t + np.log(inside_point - c)
self.sawlog.imag = np.unwrap(self.sawlog.imag)
self.inf_scale = complex_weights/(c-inside_point) / (2.0j*np.pi)
def get_differentiation_matrix(self):
if not hasattr(self, 'differentiation_matrix'):
self.differentiation_matrix = differentiation_matrix(self.boundary.N)
return self.differentiation_matrix
def Form(self, target, side, do_DLP=False,
do_SLP=False, gradient=False, main_type='real', gradient_type='real', forstokes=False):
return Compensated_Laplace_Form(self.boundary, target, side, do_DLP,
do_SLP, gradient, main_type, gradient_type, forstokes)
def Apply(self, target, side, tau, do_DLP=False,
do_SLP=False, gradient=False, main_type='real', gradient_type='real', backend='fly', forstokes=False):
return Compensated_Laplace_Apply(self.boundary, target, side, tau, do_DLP,
do_SLP, gradient, main_type, gradient_type, backend, forstokes)
def Get_Close_Corrector(self, target, side, do_DLP=False, do_SLP=False, backend='fly'):
return Laplace_Close_Corrector(self.boundary, target, side, do_DLP, do_SLP, backend)
class Laplace_Close_Corrector(object):
def __init__(self, source, target, side, do_DLP, do_SLP, backend):
self.source = source
self.target = target
self.side = side
self.do_DLP = do_DLP
self.do_SLP = do_SLP
self.backend = backend
self.preformed = self.backend == 'preformed'
self.prepare = self._prepare_formed if self.preformed else self._prepare_apply
self.call_func = self._call_formed if self.preformed else self._call_apply
self.prepare()
def __call__(self, *args, **kwargs):
self.call_func(*args, **kwargs)
def _prepare_formed(self):
close_mat = Compensated_Laplace_Form(self.source, self.target, self.side, self.do_DLP, self.do_SLP)
naive_mat = Laplace_Layer_Form(self.source, self.target, ifcharge=self.do_SLP, ifdipole=self.do_DLP)
self.correction_mat = close_mat.real - naive_mat
def _prepare_apply(self):
pass
def _call_formed(self, u, tau, close_pts):
u[close_pts] += self.correction_mat.dot(tau)
def _call_apply(self, u, tau, close_pts):
v1 = Compensated_Laplace_Apply(self.source, self.target, self.side, tau, do_DLP=self.do_DLP, do_SLP=self.do_SLP, backend=self.backend)
ch = tau if self.do_SLP else None
ds = tau if self.do_DLP else None
v2 = Laplace_Layer_Apply(self.source, self.target, charge=ch, dipstr=ds, backend=self.backend)
u[close_pts] += (v1.real - v2)
def Compensated_Laplace_Form(source, target, side, do_DLP=False,
do_SLP=False, gradient=False, main_type='real', gradient_type='real', forstokes=False):
"""
Full Formation of Close-Eval Matrix for Laplace Problem
Parameters:
source (required): Boundary, source
target (required): PointSet, target
side (required): 'i' or 'e' for interior/exterior evaluation
do_DLP (optional): whether to include DLP evaluation
do_SLP (optional): whether to include SLP evaluation
gradient (optional): compute gradient matrices or not
main_type (optional): if 'real', return only real part of main matrix,
otherwise return both real and complex parts
grad_type (optional): if 'real', return two real matrices for u_x and u_y,
otherwise return one complex matrix with the real
part giving evaluation of u_x and the imaginary
part giving evaluation of u_y
Returns:
if not gradient and main_type =='real':
MAT; real matrix such that u = MAT.dot(tau)
if not gradient and main_type == 'complex':
MAT; complex matrix such that u = MAT.real.dot(tau)
the complex part of MAT is used in Stokes evaluations
if gradient and grad_type == 'real':
(MAT, DX_MAT, DY_MAT), tuple of matrices
MAT as described above
DX_MAT real matrix such that u_x = DX_MAT.dot(tau)
DY_MAT real matrix such that u_y = DY_MAT.dot(tau)
if gradient and grad_type == 'complex':
(MAT, DMAT), tuple of matrices
MAT as described above
DMAT complex matrix such that:
u_x = DMAT.real.dot(tau)
u_y = -DMAT.imag.dot(tau)
"""
N = source.N
M = target.N
PM = np.zeros([N, N], dtype=complex)
if do_DLP:
PM += compensated_laplace_dlp_preform(source, side)
if do_SLP:
SPM, AFTER_MATS = compensated_laplace_slp_preform(source, target, side,
gradient=gradient)
if gradient:
AFTER_MAT = AFTER_MATS[0]
AFTER_DER_MAT = AFTER_MATS[1]
else:
AFTER_MAT = AFTER_MATS
if forstokes:
SPM *= 0.5
AFTER_MAT *= 0.5
if gradient:
AFTER_DER_MAT *= 0.5
PM += SPM
cauchy_mats = compensated_cauchy_form(source, target, side,
derivative=gradient)
if gradient:
cauchy_mat = cauchy_mats[0]
der_cauchy_mat = cauchy_mats[1]
else:
cauchy_mat = cauchy_mats
MAT1 = cauchy_mat.dot(PM)
if gradient:
der_cauchy_mat = cauchy_mats[1]
MATD = der_cauchy_mat.dot(PM)
if do_SLP:
MAT1 += AFTER_MAT
if gradient:
MATD += AFTER_DER_MAT
MAT = MAT1.real if main_type == 'real' else MAT1
if gradient:
if gradient_type == 'real':
ret = (MAT, MATD.real, -MATD.imag)
else:
ret = (MAT, MATD)
else:
ret = MAT
return ret
def compensated_cauchy_form(source, target, side, derivative=False):
sc = source.c
scT = sc[:,None]
tcT = target.c[:,None]
cw = source.complex_weights
cwT = cw[:,None]
comp = Cauchy_Layer_Form(source, target)
J0 = rowsum(comp)
if side == 'e':
J0 += 1.0
prefac = 1.0/J0
prefacT = prefac[:,None]
MAT = ne.evaluate('prefacT*comp')
if derivative:
# get Schneider-Werner derivative matrix
DMAT = 2.0j*np.pi*Cauchy_Layer_Form(source, source)
np.fill_diagonal(DMAT, 0.0)
np.fill_diagonal(DMAT, -rowsum(DMAT))
if side == 'e':
np.fill_diagonal(DMAT, DMAT.diagonal() - 2.0j*np.pi)
ne.evaluate('DMAT/cwT', out=DMAT)
ret = MAT, MAT.dot(DMAT)
else:
ret = MAT
return ret
def compensated_laplace_dlp_preform(source, side):
method = source.Laplace_Close_Quad
A1 = Cauchy_Layer_Form(source, source)
np.fill_diagonal(A1, -rowsum(A1))
scale = 1.0j/source.N
MAT = A1 + scale*method.get_differentiation_matrix()
if side == 'i':
np.fill_diagonal(MAT, MAT.diagonal()-1)
return MAT
def compensated_laplace_slp_preform(source, target, side, gradient=False):
cslp_method = source.Laplace_CSLP_Self_Kress
method = source.Laplace_Close_Quad
target_difference = source.get_inside_point() - target.c
# check if the CSLP Matrix was already generated
CSLP = cslp_method.Form(side).copy()
if side == 'e':
# what gets done before cauchy
MAT1 = CSLP + method.sawlog[:,None]*(source.weights/(2.0*np.pi))
MAT2 = method.inf_scale.dot(MAT1)[:,None]
MAT = MAT1 - MAT2.T
# what gets done after cauchy
LA = np.log( | np.abs(target_difference) | numpy.abs |
import numpy as np
import numpy
####################################################################
def paint(pos, mesh, weights=1.0, mode="raise", period=None, transform=None):
""" CIC approximation (trilinear), painting points to Nmesh,
each point has a weight given by weights.
This does not give density.
pos is supposed to be row vectors. aka for 3d input
pos.shape is (?, 3).
pos[:, i] should have been normalized in the range of [ 0, mesh.shape[i] )
thus z is the fast moving index
mode can be :
"raise" : raise exceptions if a particle is painted
outside the mesh
"ignore": ignore particle contribution outside of the mesh
period can be a scalar or of length len(mesh.shape). if period is given
the particles are wrapped by the period.
transform is a function that transforms pos to mesh units:
transform(pos[:, 3]) -> meshpos[:, 3]
"""
pos = numpy.array(pos)
chunksize = 1024 * 16 * 4
Ndim = pos.shape[-1]
Np = pos.shape[0]
if transform is None:
transform = lambda x:x
neighbours = ((numpy.arange(2 ** Ndim)[:, None] >> \
numpy.arange(Ndim)[None, :]) & 1)
for start in range(0, Np, chunksize):
chunk = slice(start, start+chunksize)
if numpy.isscalar(weights):
wchunk = weights
else:
wchunk = weights[chunk]
gridpos = transform(pos[chunk])
rmi_mode = 'raise'
intpos = numpy.intp(numpy.floor(gridpos))
for i, neighbour in enumerate(neighbours):
neighbour = neighbour[None, :]
targetpos = intpos + neighbour
kernel = (1.0 - numpy.abs(gridpos - targetpos)).prod(axis=-1)
add = wchunk * kernel
if period is not None:
period = numpy.int32(period)
numpy.remainder(targetpos, period, targetpos)
if len(targetpos) > 0:
targetindex = numpy.ravel_multi_index(
targetpos.T, mesh.shape, mode=rmi_mode)
u, label = | numpy.unique(targetindex, return_inverse=True) | numpy.unique |
# -*- coding: utf-8 -*-
"""
Created on 29/03/2020
@author: Mahdad
THIS IS THE CLASS FOR "MACHINE LEARNING & DEPRESSION PROJECT."
The class is capable of extracting relevant features, applying various machine-
learning algorithms and finally applying Randomized grid search to tune hyper-
parameters of different classifiers.
After each method of the class, there is a short description, introducing the
relevant input/outputs.
"""
#%% Importing libs
import numpy as np
import pandas as pd
import pywt
from scipy.signal import butter, lfilter, periodogram, spectrogram, welch
from sklearn.ensemble import RandomForestClassifier
import heapq
from scipy.signal import argrelextrema
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
import seaborn as sb
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from scipy.stats import kurtosis, skew
from entropy.entropy import spectral_entropy
from scipy.fftpack import fft
import h5py
import time
class ML_Depression():
def __init__(self, filename, channel, fs, T):
self.filename = filename
self.channel = channel
self.fs = fs
self.T = T
def FeatureExtraction(self):
''' ~~~~~~################## INSTRUCTION #################~~~~~~~~
----
THIS IS A FUNCTION TO EXTRACT FEATURES AND THEN USE THEM FOR ANY KIND OF
SUPERVISED MACHINE LEARNING ALGORITHM.
INPUTS:
1) filename : full directory of train-test split (e.g. .h5 file saved via Prepare_for_CNN.py)
2) channel : channel of interest, e.g. 'fp2-M1'
OUTPUTS:
1) X : Concatenation of all featureset after random permutation.
2) y : Relevant labels of "X".
'''
# Loading data section
# Load data
tic = time.time()
fname = self.filename
# choose channel to extract features from
ch = self.channel
fs = self.fs #Hz
T = self.T #sec
# Split train and test
with h5py.File(fname, 'r') as rf:
xtest = rf['.']['x_test_' + ch].value
xtrain = rf['.']['x_train_' + ch].value
ytest = rf['.']['y_test_' + ch].value
ytrain = rf['.']['y_train_' + ch].value
print('train and test data loaded in : {} secs'.format(time.time()-tic))
# Flatten data for filter and normalization
X_train = np.reshape(xtrain, (np.shape(xtrain)[0] * np.shape(xtrain)[1] ,1))
X_test = np.reshape(xtest, (np.shape(xtest)[0] * np.shape(xtest)[1] ,1))
#%% Filtering section
## Defining preprocessing function ##
def butter_bandpass_filter(data, lowcut, highcut, fs, order = 2):
nyq = 0.5 * fs
low = lowcut /nyq
high = highcut/nyq
b, a = butter(order, [low, high], btype='band')
#print(b,a)
y = lfilter(b, a, data)
return y
# Apply filter
X_train = butter_bandpass_filter(data=X_train, lowcut=.3, highcut=20, fs=fs, order=2)
X_test = butter_bandpass_filter(data=X_test , lowcut=.3, highcut=20, fs=fs, order=2)
#%% Normalization section - DEACTIVATED
#sc = StandardScaler()
#X_train = sc.fit_transform(X_train)
#X_test = sc.transform(X_test)
#%% Reshaping data per epoch
X_train = np.reshape(X_train, (int(len(X_train) / (fs*T)), fs*T))
X_test = np.reshape(X_test, (int(len(X_test) / (fs*T)), fs*T))
X = np.concatenate((X_train, X_test))
Y = np.concatenate((ytrain, ytest))
#%% Feature Extraction section
# Defining EEG bands:
eeg_bands = {'Delta' : (0.5, 4),
'Theta' : (4 , 8),
'Alpha' : (8 , 12),
'Beta' : (12 , 20),
'Sigma' : (12 , 16),
'Sigma_slow': (12 , 14),
'Sigma_fast': (14 , 16)}
# Initializing variables of interest
eeg_band_fft = dict()
freq_ix = dict()
Features = np.empty((0, 42))
# Settings of peridogram
Window = 'hann'
# zero-padding added with respect to (Nfft=2^(nextpow2(len(window))))
Nfft = 2 ** 15
# Defining freq. resoultion
fm, _ = periodogram(x = X[0,:], fs = fs, nfft = Nfft , window = Window)
tic = time.time()
# Finding the index of different freq bands with respect to "fm" #
for band in eeg_bands:
freq_ix[band] = np.where((fm >= eeg_bands[band][0]) &
(fm <= eeg_bands[band][1]))[0]
print('Feature extraction started ... Please wait ...')
# Defining for loop to extract features per epoch
for i in np.arange(len(X)):
data = X[i,:]
# Compute the "total" power inside the investigational window
_ , pxx = periodogram(x = data, fs = fs, nfft = Nfft , window = Window)
# Initialization for wavelet
cA_values = []
cD_values = []
cA_mean = []
cA_std = []
cA_Energy = []
cD_mean = []
cD_std = []
cD_Energy = []
Entropy_D = []
Entropy_A = []
first_diff = np.zeros(len(data)-1)
'''Power in differnt freq ranges '''
# Total pow is defined form 0.5 - 20 Hz
pow_total = np.sum(pxx[np.arange(freq_ix['Delta'][0], freq_ix['Beta'][-1]+1)])
Pow_Delta = np.sum(pxx[freq_ix['Delta']]) / pow_total
Pow_Theta = np.sum(pxx[freq_ix['Theta']]) / pow_total
Pow_Alpha = np.sum(pxx[freq_ix['Alpha']]) / pow_total
Pow_Beta = np.sum(pxx[freq_ix['Beta']]) / pow_total
Pow_Sigma = np.sum(pxx[freq_ix['Sigma']]) / pow_total
Pow_Sigma_slow = np.sum(pxx[freq_ix['Sigma_slow']]) / pow_total
Pow_Sigma_fast = np.sum(pxx[freq_ix['Sigma_fast']]) / pow_total
'''Apply Welch to see the dominant Max power in each freq band'''
ff, Psd = welch(x = data, fs = fs, window = 'hann', nperseg= 512, nfft = Nfft)
Pow_max_Total = np.max(Psd[np.arange(freq_ix['Delta'][0], freq_ix['Beta'][-1]+1)])
Pow_max_Delta = np.max(Psd[freq_ix['Delta']])
Pow_max_Theta = np.max(Psd[freq_ix['Theta']])
Pow_max_Alpha = np.max(Psd[freq_ix['Alpha']])
Pow_max_Beta = np.max(Psd[freq_ix['Beta']])
Pow_max_Sigma = np.max(Psd[freq_ix['Sigma']])
Pow_max_Sigma_slow = np.max(Psd[freq_ix['Sigma_slow']])
Pow_max_Sigma_fast = np.max(Psd[freq_ix['Sigma_fast']])
''' Spectral Entropy '''
Entropy_Welch = spectral_entropy(x = data, sf=fs, method='welch', nperseg = 512)
Entropy_fft = spectral_entropy(x = data, sf=fs, method='fft')
''' Wavelet Decomposition '''
cA,cD=pywt.dwt(data,'coif1')
cA_values.append(cA)
cD_values.append(cD)
cA_mean.append(np.mean(cA_values))
cA_std.append(np.std(cA_values))
cA_Energy.append(np.sum(np.square(cA_values)))
cD_mean.append(np.mean(cD_values))
cD_std.append(np.std(cD_values))
cD_Energy.append(np.sum(np.square(cD_values)))
Entropy_D.append(np.sum(np.square(cD_values) * np.log(np.square(cD_values))))
Entropy_A.append(np.sum(np.square(cA_values) * np.log(np.square(cA_values))))
''' Hjorth Parameters '''
hjorth_activity = np.var(data)
diff_input = np.diff(data)
diff_diffinput = np.diff(diff_input)
hjorth_mobility = np.sqrt(np.var(diff_input)/hjorth_activity)
hjorth_diffmobility = np.sqrt(np.var(diff_diffinput)/np.var(diff_input))
hjorth_complexity = hjorth_diffmobility / hjorth_mobility
''' Statisctical features'''
Kurt = kurtosis(data, fisher = False)
Skewness = skew(data)
Mean = np.mean(data)
Median = np.median(data)
Std = np.std(data)
''' Coefficient of variation '''
coeff_var = Std / Mean
''' First and second difference mean and max '''
sum1 = 0.0
sum2 = 0.0
Max1 = 0.0
Max2 = 0.0
for j in range(len(data)-1):
sum1 += abs(data[j+1]-data[j])
first_diff[j] = abs(data[j+1]-data[j])
if first_diff[j] > Max1:
Max1 = first_diff[j] # fi
for j in range(len(data)-2):
sum2 += abs(first_diff[j+1]-first_diff[j])
if abs(first_diff[j+1]-first_diff[j]) > Max2 :
Max2 = first_diff[j+1]-first_diff[j]
diff_mean1 = sum1 / (len(data)-1)
diff_mean2 = sum2 / (len(data)-2)
diff_max1 = Max1
diff_max2 = Max2
''' Variance and Mean of Vertex to Vertex Slope '''
t_max = argrelextrema(data, np.greater)[0]
amp_max = data[t_max]
t_min = argrelextrema(data, np.less)[0]
amp_min = data[t_min]
tt = | np.concatenate((t_max,t_min),axis=0) | numpy.concatenate |
# Created by <NAME> on 20190314
#
# Kernel methods for 2048.
#
import numpy as np
class My2048:
"""
My 2048 class.
"""
def __init__(self, game_size):
"""
:param game_size: 2D tuple for the game size.
"""
self.game_size = tuple(game_size)
self.checkerboard = np.zeros(self.game_size, dtype=np.int32)
# Spawn two blocks.
for _ in range(2):
self._spawn_new()
def __str__(self):
return str(self.checkerboard)
def _spawn_new(self):
"""Spawn a new block.
:return: True if success.
"""
# Get all indexes with 0 value.
zero_indexes = list(zip(*np.where(self.checkerboard == 0)))
if not len(zero_indexes):
return False
# Randomly pick up an index for spawning.
target_index = np.random.randint(0, len(zero_indexes))
# Spawn.
self.checkerboard[zero_indexes[target_index]] = 2 if np.random.random() < 0.75 else 4
return True
def _move_left(self):
"""Take a left move action. All other actions are based on this method.
:return: True if moved.
"""
is_moved = False
for x in range(self.game_size[0]):
has_merged_list = np.zeros(self.game_size[1], dtype=np.bool)
for y in range(self.game_size[1]):
# Skip 0 cells.
if not self.checkerboard[x, y]:
continue
# Scan to the most left.
for j in range(y, 0, -1):
if not self.checkerboard[x][j-1]:
is_moved = True
self.checkerboard[x][j-1] = self.checkerboard[x][j]
self.checkerboard[x][j] = 0
elif (self.checkerboard[x][j-1] == self.checkerboard[x][j]) and not has_merged_list[j-1]:
# Merge if possible.
has_merged_list[j-1] = True
self.checkerboard[x][j-1] *= 2
self.checkerboard[x][j] = 0
return is_moved
def move(self, direction, no_spawn=False):
"""
Take a move.
:param direction: "up", "down", "left", "right".
:param no_spawn: Do not spawn new block if True.
:return: None.
"""
# Rotate the checkerboard.
rotation_mapping = {
"left": 0,
"up": 1,
"right": 2,
"down": 3,
}
self.checkerboard = | np.rot90(self.checkerboard, k=rotation_mapping[direction], axes=(0, 1)) | numpy.rot90 |
"""
data generator for feeding data into pytorch models
NOTE
----
In order to avoid potential error in the methods of slicing signals and rr intervals,
one can check using the following code
```python
from cfg import TrainCfg
ds_train = CPSC2021(TrainCfg, task="qrs_detection", training=True)
ds_val = CPSC2021(TrainCfg, task="qrs_detection", training=False)
err_list = []
for idx, seg in enumerate(ds_train.segments):
sig, lb = ds_train[idx]
if sig.shape != (2,6000) or lb.shape != (750, 1):
print("\n"+f"segment {seg} has sig.shape = {sig.shape}, lb.shape = {lb.shape}"+"\n")
err_list.append(seg)
print(f"{idx+1}/{len(ds_train)}", end="\r")
for idx, seg in enumerate(ds_val.segments):
sig, lb = ds_val[idx]
if sig.shape != (2,6000) or lb.shape != (750, 1):
print("\n"+f"segment {seg} has sig.shape = {sig.shape}, lb.shape = {lb.shape}"+"\n")
err_list.append(seg)
print(f"{idx+1}/{len(ds_val)}", end="\r")
for idx, seg in enumerate(err_list):
path = ds_train._get_seg_data_path(seg)
os.remove(path)
path = ds_train._get_seg_ann_path(seg)
os.remove(path)
print(f"{idx+1}/{len(err_list)}", end="\r")
```
and similarly for the task of `rr_lstm`
"""
import os, sys
import json
import re
import time
import multiprocessing as mp
import random
from itertools import repeat
from copy import deepcopy
from typing import Union, Optional, List, Tuple, Dict, Sequence, Set, NoReturn
import numpy as np
np.set_printoptions(precision=5, suppress=True)
from scipy import signal as SS
try:
from tqdm.auto import tqdm
except ModuleNotFoundError:
from tqdm import tqdm
import torch
from torch.utils.data.dataset import Dataset
from scipy.io import loadmat, savemat
try:
import torch_ecg
except ModuleNotFoundError:
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(dirname(abspath(__file__)))))
from torch_ecg.cfg import CFG
from torch_ecg.databases import CPSC2021 as CR
from torch_ecg._preprocessors import PreprocManager
from torch_ecg.utils.utils_interval import mask_to_intervals
from torch_ecg.utils.utils_signal import normalize, remove_spikes_naive
from torch_ecg.utils.misc import (
list_sum, nildent, uniform,
get_record_list_recursive3,
)
from cfg import (
TrainCfg, ModelCfg,
)
if ModelCfg.torch_dtype == torch.float64:
torch.set_default_tensor_type(torch.DoubleTensor)
__all__ = [
"CPSC2021",
]
class CPSC2021(Dataset):
"""
1. ECGs are preprocessed and stored in one folder
2. preprocessed ECGs are sliced with overlap to generate data and label for different tasks:
the data files stores segments of fixed length of preprocessed ECGs,
the annotation files contain "qrs_mask", and "af_mask"
"""
__DEBUG__ = False
__name__ = "CPSC2021"
def __init__(self, config:CFG, task:str, training:bool=True, lazy:bool=True) -> NoReturn:
""" finished, checked,
Parameters
----------
config: dict,
configurations for the Dataset,
ref. `cfg.TrainCfg`
training: bool, default True,
if True, the training set will be loaded, otherwise the test set
"""
super().__init__()
self.config = deepcopy(config)
self.reader = CR(db_dir=config.db_dir)
if self.config.torch_dtype == torch.float64:
self.dtype = np.float64
else:
self.dtype = np.float32
self.allowed_preproc = list(set(["bandpass", "baseline_remove",]).intersection(set(self.config.keys())))
self.training = training
self.lazy = lazy
ppm_config = CFG(random=False)
ppm_config.update(deepcopy(self.config))
ppm_config.pop("normalize")
seg_ppm_config = CFG(random=False)
seg_ppm_config.update(deepcopy(self.config))
seg_ppm_config.pop("bandpass")
self.ppm = PreprocManager.from_config(ppm_config)
self.seg_ppm = PreprocManager.from_config(seg_ppm_config)
# create directories if needed
# preprocess_dir stores pre-processed signals
self.preprocess_dir = os.path.join(config.db_dir, "preprocessed")
os.makedirs(self.preprocess_dir, exist_ok=True)
# segments_dir for sliced segments of fixed length
self.segments_base_dir = os.path.join(config.db_dir, "segments")
os.makedirs(self.segments_base_dir, exist_ok=True)
self.segment_name_pattern = "S_\d{1,3}_\d{1,2}_\d{7}"
self.segment_ext = "mat"
# rr_dir for sequence of rr intervals of fix length
self.rr_seq_base_dir = os.path.join(config.db_dir, "rr_seq")
os.makedirs(self.rr_seq_base_dir, exist_ok=True)
self.rr_seq_name_pattern = "R_\d{1,3}_\d{1,2}_\d{7}"
self.rr_seq_ext = "mat"
self._all_data = None
self._all_labels = None
self._all_masks = None
self.__set_task(task, lazy=self.lazy)
def _load_all_data(self) -> NoReturn:
"""
"""
self.__set_task(self.task, lazy=False)
def __set_task(self, task:str, lazy:bool=True) -> NoReturn:
""" finished, checked,
Parameters
----------
task: str,
name of the task, can be one of `TrainCfg.tasks`
"""
assert task.lower() in TrainCfg.tasks, f"illegal task \042{task}\042"
if hasattr(self, "task") and self.task == task.lower() and self._all_data is not None and len(self._all_data)>0:
return
self.task = task.lower()
self.all_classes = self.config[task].classes
self.n_classes = len(self.config[task].classes)
self.lazy = lazy
self.seglen = self.config[task].input_len # alias, for simplicity
split_res = self._train_test_split(
train_ratio=self.config.train_ratio,
force_recompute=False,
)
if self.training:
self.subjects = split_res.train
else:
self.subjects = split_res.test
if self.task in ["qrs_detection", "main",]:
# for qrs detection, or for the main task
self.segments_dirs = CFG()
self.__all_segments = CFG()
self.segments_json = os.path.join(self.segments_base_dir, "segments.json")
self._ls_segments()
self.segments = list_sum([self.__all_segments[subject] for subject in self.subjects])
if self.__DEBUG__:
self.segments = random.sample(self.segments, int(len(self.segments) * 0.01))
if self.training:
random.shuffle(self.segments)
# preload data
self.fdr = FastDataReader(self.config, self.task, self.seg_ppm, self.segments_dirs, self.segments, self.segment_ext)
if self.lazy:
return
self._all_data, self._all_labels, self._all_masks = [], [], []
with tqdm(range(len(self.fdr)), desc="Loading data", unit="records") as pbar:
for idx in pbar:
d, l, m = self.fdr[idx]
self._all_data.append(d)
self._all_labels.append(l)
self._all_masks.append(m)
self._all_data = np.array(self._all_data).astype(self.dtype)
self._all_labels = np.array(self._all_labels).astype(self.dtype)
if self.task == "qrs_detection":
self._all_masks = None
else:
self._all_masks = np.array(self._all_masks).astype(self.dtype)
elif self.task in ["rr_lstm",]:
self.rr_seq_dirs = CFG()
self.__all_rr_seq = CFG()
self.rr_seq_json = os.path.join(self.rr_seq_base_dir, "rr_seq.json")
self._ls_rr_seq()
self.rr_seq = list_sum([self.__all_rr_seq[subject] for subject in self.subjects])
if self.__DEBUG__:
self.rr_seq = random.sample(self.rr_seq, int(len(self.rr_seq) * 0.01))
if self.training:
random.shuffle(self.rr_seq)
# preload data
self.fdr = FastDataReader(self.config, self.task, self.seg_ppm, self.rr_seq_dirs, self.rr_seq, self.rr_seq_ext)
if self.lazy:
return
self._all_data, self._all_labels, self._all_masks = [], [], []
with tqdm(range(len(self.fdr)), desc="Loading data", unit="records") as pbar:
for idx in pbar:
d, l, m = self.fdr[idx]
self._all_data.append(d)
self._all_labels.append(l)
self._all_masks.append(m)
self._all_data = np.array(self._all_data).astype(self.dtype)
self._all_labels = | np.array(self._all_labels) | numpy.array |
import os
import torch
import numpy as np
import scipy.misc
import imageio
import torch.nn.functional as F
from PIL import Image
from utils.dcrf import crf_inference
from datasets.pascal_voc_ms import MultiscaleLoader, CropLoader
class ResultWriter:
def __init__(self, cfg, palette, out_path, verbose=True):
self.cfg = cfg
self.palette = palette
self.root = out_path
self.verbose = verbose
def _mask_overlay(self, mask, image, alpha=0.3):
"""Creates an overlayed mask visualisation"""
mask_rgb = self.__mask2rgb(mask)
return alpha * image + (1 - alpha) * mask_rgb
def __mask2rgb(self, mask):
im = Image.fromarray(mask).convert("P")
im.putpalette(self.palette)
mask_rgb = np.array(im.convert("RGB"), dtype=np.float)
return mask_rgb / 255.
def _merge_masks(self, masks, labels, pads):
"""Combines masks at multiple scales
Args:
masks: list of masks obtained at different scales
(already scaled to the original)
Returns:
pred: combined single mask
pred_crf: refined mask with CRF
"""
raise NotImplementedError
def save(self, img_path, img_orig, all_masks, labels, pads, gt_mask):
img_name = os.path.basename(img_path).rstrip(".jpg")
# converting original image to [0, 255]
img_orig255 = np.round(255. * img_orig).astype(np.uint8)
img_orig255 = np.transpose(img_orig255, [1,2,0])
img_orig255 = np.ascontiguousarray(img_orig255)
merged_mask = self._merge_masks(all_masks, pads, labels, img_orig255.shape[:2])
pred = np.argmax(merged_mask, 0)
# CRF
pred_crf = crf_inference(img_orig255, merged_mask, t=10, scale_factor=1, labels=21)
pred_crf = np.argmax(pred_crf, 0)
filepath = os.path.join(self.root, img_name + '.png')
# scipy.misc.imsave(filepath, pred.astype(np.uint8))
imageio.imsave(filepath, pred.astype(np.uint8))
filepath = os.path.join(self.root, "crf", img_name + '.png')
# scipy.misc.imsave(filepath, pred_crf.astype(np.uint8))
imageio.imsave(filepath, pred_crf.astype(np.uint8))
if self.verbose:
mask_gt = gt_mask.numpy()
masks_all = np.concatenate([pred, pred_crf, mask_gt], 1).astype(np.uint8)
images = | np.concatenate([img_orig]*3, 2) | numpy.concatenate |
# -*- coding: utf-8 -*-
from collections import defaultdict, OrderedDict
from itertools import permutations
import math
import pytest
try:
import numpy as np
except ImportError:
np = None
from chempy import Equilibrium, Reaction, ReactionSystem, Substance
from chempy.thermodynamics.expressions import MassActionEq
from chempy.units import (
SI_base_registry,
get_derived_unit,
allclose,
units_library,
linspace,
to_unitless,
default_constants as const,
default_units as u,
)
from chempy.util._expr import Expr
from chempy.util.testing import requires
from .test_rates import _get_SpecialFraction_rsys
from ..arrhenius import ArrheniusParam
from ..rates import Arrhenius, MassAction, Radiolytic, RampedTemp
from .._rates import ShiftedTPoly
from ..ode import (
get_odesys,
chained_parameter_variation,
_mk_dedim,
_create_odesys as create_odesys,
)
from ..integrated import dimerization_irrev, binary_rev
@requires("numpy", "pyodesys")
def test_get_odesys_1():
k = 0.2
a = Substance("A")
b = Substance("B")
r = Reaction({"A": 1}, {"B": 1}, param=k)
rsys = ReactionSystem([r], [a, b])
assert sorted(rsys.substances.keys()) == ["A", "B"]
odesys = get_odesys(rsys, include_params=True)[0]
c0 = {
"A": 1.0,
"B": 3.0,
}
t = np.linspace(0.0, 10.0)
xout, yout, info = odesys.integrate(t, c0)
yref = np.zeros((t.size, 2))
yref[:, 0] = np.exp(-k * t)
yref[:, 1] = 4 - np.exp(-k * t)
assert np.allclose(yout, yref)
@requires("numpy", "pyodesys", "sympy")
def test_get_odesys__rate_exprs_cb():
k = 0.2
a = Substance("A")
b = Substance("B")
r = Reaction({"A": 1}, {"B": 1}, param=k)
rsys = ReactionSystem([r], [a, b])
assert sorted(rsys.substances.keys()) == ["A", "B"]
odesys, extra = get_odesys(rsys)
c0 = {"A": 1.0, "B": 3.0}
t = np.linspace(0.0, 10.0)
res = odesys.integrate(t, c0)
yref = np.zeros((t.size, 2))
yref[:, 0] = np.exp(-k * t)
yref[:, 1] = 4 - np.exp(-k * t)
assert np.allclose(res.yout, yref)
rate = extra["rate_exprs_cb"](res.xout, res.yout, res.params)
assert np.allclose(rate[:, 0], k * yref[:, 0])
@requires("numpy", "pyodesys")
def test_get_odesys_2():
g = Radiolytic([3.14])
a = Substance("A")
b = Substance("B")
r = Reaction({"A": 1}, {"B": 1}, param=g)
rsys = ReactionSystem([r], [a, b])
odesys = get_odesys(rsys, include_params=True)[0]
c0 = {
"A": 1.0,
"B": 3.0,
}
t = np.linspace(0.0, 0.1)
xout, yout, info = odesys.integrate(
t, rsys.as_per_substance_array(c0), {"doserate": 2.72, "density": 0.998}
)
yref = np.zeros((t.size, 2))
k = 3.14 * 2.72 * 0.998
yref[:, 0] = 1 - k * t
yref[:, 1] = 3 + k * t
assert np.allclose(yout, yref)
@requires(units_library, "pyodesys")
def test_get_odesys_3():
M = u.molar
s = u.second
mol = u.mol
m = u.metre
substances = list(map(Substance, "H2O H+ OH-".split()))
dissociation = Reaction({"H2O": 1}, {"H+": 1, "OH-": 1}, 2.47e-5 / s)
recombination = Reaction({"H+": 1, "OH-": 1}, {"H2O": 1}, 1.37e11 / M / s)
rsys = ReactionSystem([dissociation, recombination], substances)
odesys = get_odesys(
rsys, include_params=True, unit_registry=SI_base_registry, output_conc_unit=M
)[0]
c0 = {"H2O": 55.4 * M, "H+": 1e-7 * M, "OH-": 1e-4 * mol / m ** 3}
x, y, p = odesys.to_arrays(
-42 * u.second, rsys.as_per_substance_array(c0, unit=M), ()
)
fout = odesys.f_cb(x, y, p)
time_unit = get_derived_unit(SI_base_registry, "time")
conc_unit = get_derived_unit(SI_base_registry, "concentration")
r1 = to_unitless(55.4 * 2.47e-5 * M / s, conc_unit / time_unit)
r2 = to_unitless(1e-14 * 1.37e11 * M / s, conc_unit / time_unit)
assert np.all(abs(fout[:, 0] - r2 + r1)) < 1e-10
assert np.all(abs(fout[:, 1] - r1 + r2)) < 1e-10
assert np.all(abs(fout[:, 2] - r1 + r2)) < 1e-10
@requires(units_library, "pyodesys")
def test_get_odesys__with_units():
a = Substance("A")
b = Substance("B")
molar = u.molar
second = u.second
r = Reaction({"A": 2}, {"B": 1}, param=1e-3 / molar / second)
rsys = ReactionSystem([r], [a, b])
odesys = get_odesys(rsys, include_params=True, unit_registry=SI_base_registry)[0]
c0 = {"A": 13 * u.mol / u.metre ** 3, "B": 0.2 * u.molar}
conc_unit = get_derived_unit(SI_base_registry, "concentration")
t = np.linspace(0, 10) * u.hour
xout, yout, info = odesys.integrate(
t, rsys.as_per_substance_array(c0, unit=conc_unit), atol=1e-10, rtol=1e-12
)
t_unitless = to_unitless(xout, u.second)
Aref = dimerization_irrev(t_unitless, 1e-6, 13.0)
# Aref = 1/(1/13 + 2*1e-6*t_unitless)
yref = np.zeros((xout.size, 2))
yref[:, 0] = Aref
yref[:, 1] = 200 + (13 - Aref) / 2
assert allclose(yout, yref * conc_unit)
@requires("numpy", "pyodesys")
def test_SpecialFraction():
k, kprime = 3.142, 2.718
rsys = _get_SpecialFraction_rsys(k, kprime)
odesys = get_odesys(rsys, include_params=True)[0]
c0 = {"H2": 13, "Br2": 17, "HBr": 19}
r = k * c0["H2"] * c0["Br2"] ** (3 / 2) / (c0["Br2"] + kprime * c0["HBr"])
ref = rsys.as_per_substance_array({"H2": -r, "Br2": -r, "HBr": 2 * r})
res = odesys.f_cb(0, rsys.as_per_substance_array(c0))
assert np.allclose(res, ref)
@requires(units_library, "pyodesys")
def test_SpecialFraction_with_units():
k, kprime = 3.142 * u.s ** -1 * u.molar ** -0.5, 2.718
rsys = _get_SpecialFraction_rsys(k, kprime)
odesys = get_odesys(rsys, include_params=True, unit_registry=SI_base_registry)[0]
c0 = {"H2": 13 * u.molar, "Br2": 16 * u.molar, "HBr": 19 * u.molar}
r = k * c0["H2"] * c0["Br2"] ** (3 / 2) / (c0["Br2"] + kprime * c0["HBr"])
conc_unit = u.mol / u.metre ** 3
rate_unit = conc_unit / u.second
ref = rsys.as_per_substance_array(
{"H2": -r, "Br2": -r, "HBr": 2 * r}, unit=rate_unit
)
res = odesys.f_cb(0, rsys.as_per_substance_array(c0, unit=conc_unit))
assert allclose(to_unitless(ref, rate_unit), res)
@requires("pyodesys")
def test_ode_with_global_parameters():
ratex = MassAction(Arrhenius([1e10, 40e3 / 8.3145]))
rxn = Reaction({"A": 1}, {"B": 1}, ratex)
rsys = ReactionSystem([rxn], "A B")
odesys, extra = get_odesys(rsys, include_params=False)
param_keys, unique_keys, p_units = map(
extra.get, "param_keys unique p_units".split()
)
conc = {"A": 3, "B": 5}
x, y, p = odesys.to_arrays(-37, conc, {"temperature": 298.15})
fout = odesys.f_cb(x, y, p)
ref = 3 * 1e10 * np.exp(-40e3 / 8.3145 / 298.15)
assert np.all(abs((fout[:, 0] + ref) / ref) < 1e-14)
assert np.all(abs((fout[:, 1] - ref) / ref) < 1e-14)
@requires("pyodesys")
def test_get_ode__ArrheniusParam():
rxn = Reaction({"A": 1}, {"B": 1}, None)
rxn.param = ArrheniusParam(1e10, 40e3)
rsys = ReactionSystem([rxn], "A B")
odesys = get_odesys(rsys, include_params=True)[0]
conc = {"A": 3, "B": 5}
x, y, p = odesys.to_arrays(-37, conc, {"temperature": 200})
fout = odesys.f_cb(x, y, p)
ref = 3 * 1e10 * np.exp(-40e3 / 8.314472 / 200)
assert np.all(abs((fout[:, 0] + ref) / ref) < 1e-14)
assert np.all(abs((fout[:, 1] - ref) / ref) < 1e-14)
@requires("pyodesys")
def test_get_ode__Radiolytic():
rad = Radiolytic([2.4e-7])
rxn = Reaction({"A": 4, "B": 1}, {"C": 3, "D": 2}, rad)
rsys = ReactionSystem([rxn], "A B C D")
odesys = get_odesys(rsys, include_params=True)[0]
c = {"A": 3, "B": 5, "C": 11, "D": 13}
x, y, p = odesys.to_arrays(-37, c, {"doserate": 0.4, "density": 0.998})
fout = odesys.f_cb(x, y, p)
r = 2.4e-7 * 0.4 * 0.998
ref = [-4 * r, -r, 3 * r, 2 * r]
assert np.all(abs((fout - ref) / ref) < 1e-14)
@requires("pyodesys", units_library)
def test_get_ode__Radiolytic__units():
rad = Radiolytic([2.4e-7 * u.mol / u.joule])
rxn = Reaction({"A": 4, "B": 1}, {"C": 3, "D": 2}, rad)
rsys = ReactionSystem([rxn], "A B C D")
odesys = get_odesys(rsys, include_params=True, unit_registry=SI_base_registry)[0]
conc = {"A": 3 * u.molar, "B": 5 * u.molar, "C": 11 * u.molar, "D": 13 * u.molar}
x, y, p = odesys.to_arrays(
-37 * u.second,
conc,
{
"doserate": 0.4 * u.gray / u.second,
"density": 0.998 * u.kg / u.decimetre ** 3,
},
)
fout = odesys.f_cb(x, y, p) # f_cb does not carry any units
r = 2.4e-7 * 0.4 * 0.998 * 1e3 # mol/m3
ref = [-4 * r, -r, 3 * r, 2 * r]
assert np.all(abs((fout - ref) / ref) < 1e-14)
@requires("pyodesys", units_library)
def test_get_ode__Radiolytic__units__multi():
rad = Radiolytic([2.4e-7 * u.mol / u.joule])
rxn = Reaction({"A": 4, "B": 1}, {"C": 3, "D": 2}, rad)
rsys = ReactionSystem([rxn], "A B C D")
odesys = get_odesys(rsys, include_params=True, unit_registry=SI_base_registry)[0]
conc = {"A": 3 * u.molar, "B": 5 * u.molar, "C": 11 * u.molar, "D": 13 * u.molar}
doserates = [dr * u.gray / u.second for dr in [0.1, 0.2, 0.3, 0.4]]
results = odesys.integrate(
37 * u.second,
conc,
{"doserate": doserates, "density": 0.998 * u.kg / u.decimetre ** 3},
)
assert len(results) == 4
for i, r in enumerate(results):
dr = r.params[odesys.param_names.index("doserate")]
assert dr.ndim == 0 or len(dr) == 1
assert dr == doserates[i]
class Density(Expr):
""" Arguments: rho0 drhodT T0 """
parameter_keys = ("temperature",)
kw = {}
def __call__(self, variables, backend=None):
rho0, drhodT, T0 = self.all_args(variables)
return rho0 + drhodT * (variables["temperature"] - T0)
@requires("pyodesys")
def test_get_ode__Radiolytic__substitutions():
rad = Radiolytic([2.4e-7])
rxn = Reaction({"A": 4, "B": 1}, {"C": 3, "D": 2}, rad)
rsys = ReactionSystem([rxn], "A B C D")
substance_rho = Density([1, -1e-3, 273.15])
odesys = get_odesys(
rsys, include_params=True, substitutions={"density": substance_rho}
)[0]
conc = {"A": 3, "B": 5, "C": 11, "D": 13}
state = {"doserate": 0.4, "temperature": 298.15}
x, y, p = odesys.to_arrays(-37, conc, state)
fout = odesys.f_cb(x, y, p)
r = 2.4e-7 * 0.4 * substance_rho({"temperature": 298.15})
ref = [-4 * r, -r, 3 * r, 2 * r]
assert np.all(abs((fout - ref) / ref) < 1e-14)
@requires("pyodesys", units_library)
def test_get_ode__Radiolytic__substitutions__units():
rad = Radiolytic([2.4e-7 * u.mol / u.joule])
rxn = Reaction({"A": 4, "B": 1}, {"C": 3, "D": 2}, rad)
rsys = ReactionSystem([rxn], "A B C D")
g_dm3 = u.gram / u.decimetre ** 3
kg_dm3 = u.kg / u.decimetre ** 3
substance_rho = Density([1 * kg_dm3, -1 * g_dm3 / u.kelvin, 273.15 * u.kelvin])
odesys = get_odesys(
rsys,
include_params=True,
unit_registry=SI_base_registry,
substitutions={"density": substance_rho},
)[0]
conc = {"A": 3 * u.molar, "B": 5 * u.molar, "C": 11 * u.molar, "D": 13 * u.molar}
x, y, p = odesys.to_arrays(
-37 * u.second,
conc,
{"doserate": 0.4 * u.gray / u.second, "temperature": 298.15 * u.kelvin},
)
fout = odesys.f_cb(x, y, p)
r = 2.4e-7 * 0.4 * 0.975 * 1e3 # mol/m3/s
ref = [-4 * r, -r, 3 * r, 2 * r]
assert np.all(abs((fout - ref) / ref) < 1e-14)
@requires("pyodesys", units_library)
def test_get_ode__TPoly():
rate = MassAction(
ShiftedTPoly([273.15 * u.K, 10 / u.molar / u.s, 2 / u.molar / u.s / u.K])
)
rxn = Reaction({"A": 1, "B": 1}, {"C": 3, "D": 2}, rate, {"A": 3})
rsys = ReactionSystem([rxn], "A B C D")
odesys = get_odesys(rsys, unit_registry=SI_base_registry)[0]
conc = {"A": 3 * u.molar, "B": 5 * u.molar, "C": 11 * u.molar, "D": 13 * u.molar}
x, y, p = odesys.to_arrays(-37 * u.second, conc, {"temperature": 298.15 * u.kelvin})
fout = odesys.f_cb(x, y, p)
r = 3 * 5 * (10 + 2 * 25) * 1000 # mol/m3/s
ref = [-4 * r, -r, 3 * r, 2 * r]
assert np.all(abs((fout - ref) / ref) < 1e-14)
@requires("pyodesys", units_library)
def test_get_odesys__time_dep_rate():
class RampedRate(Expr):
argument_names = ("rate_constant", "ramping_rate")
def __call__(self, variables, reaction, backend=math):
rate_constant, ramping_rate = self.all_args(variables, backend=backend)
return rate_constant * ramping_rate * variables["time"]
rate = MassAction(RampedRate([7, 2]))
rxn = Reaction({"A": 1}, {"B": 3}, rate)
rsys = ReactionSystem([rxn], "A B")
odesys = get_odesys(rsys)[0]
conc = {"A": 3, "B": 11}
x, y, p = odesys.to_arrays([5, 13, 17], conc, ())
fout = odesys.f_cb(x, y, p)
r = 2 * 7 * 3
ref = np.array([[-r * 5, -r * 13, -r * 17], [r * 5 * 3, r * 13 * 3, r * 17 * 3]]).T
assert np.allclose(fout, ref)
@requires("pyodesys", units_library)
def test_get_odesys__time_dep_temperature():
import sympy as sp
def refA(t, A0, A, Ea_over_R, T0, dTdt):
T = T0 + dTdt * t
d_Ei = sp.Ei(-Ea_over_R / T0).n(100).round(90) - sp.Ei(-Ea_over_R / T).n(
100
).round(90)
d_Texp = T0 * sp.exp(-Ea_over_R / T0) - T * sp.exp(-Ea_over_R / T)
return A0 * sp.exp(A / dTdt * (Ea_over_R * d_Ei + d_Texp)).n(30)
params = A0, A, Ea_over_R, T0, dTdt = [13, 1e10, 56e3 / 8, 273, 2]
B0 = 11
rate = MassAction(Arrhenius([A, Ea_over_R]))
rxn = Reaction({"A": 1}, {"B": 3}, rate)
rsys = ReactionSystem([rxn], "A B")
rt = RampedTemp([T0, dTdt], ("init_temp", "ramp_rate"))
odesys, extra = get_odesys(rsys, False, substitutions={"temperature": rt})
all_pk, unique, p_units = map(extra.get, "param_keys unique p_units".split())
conc = {"A": A0, "B": B0}
tout = [2, 5, 10]
for ramp_rate in [2, 3, 4]:
unique["ramp_rate"] = ramp_rate
xout, yout, info = odesys.integrate(10, conc, unique, atol=1e-10, rtol=1e-12)
params[-1] = ramp_rate
Aref = np.array([float(refA(t, *params)) for t in xout])
# Aref = 1/(1/13 + 2*1e-6*t_unitless)
yref = np.zeros((xout.size, 2))
yref[:, 0] = Aref
yref[:, 1] = B0 + 3 * (A0 - Aref)
assert allclose(yout, yref)
unique["ramp_rate"] = 2
x, y, p = odesys.to_arrays(tout, conc, unique)
fout = odesys.f_cb(x, y, p)
def r(t):
return A * np.exp(-Ea_over_R / (T0 + dTdt * t)) * A0 # refA(t, *params)
ref = np.array([[-r(2), -r(5), -r(10)], [3 * r(2), 3 * r(5), 3 * r(10)]]).T
assert np.allclose(fout, ref)
@requires("numpy", "pyodesys")
def test_get_odesys__late_binding():
def _gibbs(args, T, R, backend, **kwargs):
H, S = args
return backend.exp(-(H - T * S) / (R * T))
def _eyring(args, T, R, k_B, h, backend, **kwargs):
H, S = args
return k_B / h * T * backend.exp(-(H - T * S) / (R * T))
gibbs_pk = ("temperature", "molar_gas_constant")
eyring_pk = gibbs_pk + ("Boltzmann_constant", "Planck_constant")
GibbsEC = MassActionEq.from_callback(
_gibbs, argument_names=("H", "S"), parameter_keys=gibbs_pk
)
EyringMA = MassAction.from_callback(
_eyring, argument_names=("H", "S"), parameter_keys=eyring_pk
)
uk_equil = ("He_assoc", "Se_assoc")
beta = GibbsEC(unique_keys=uk_equil) # equilibrium parameters
uk_kinet = ("Ha_assoc", "Sa_assoc")
bimol_barrier = EyringMA(unique_keys=uk_kinet) # activation parameters
eq = Equilibrium({"Fe+3", "SCN-"}, {"FeSCN+2"}, beta)
rsys = ReactionSystem(eq.as_reactions(kf=bimol_barrier))
odesys, extra = get_odesys(rsys, include_params=False)
pk, unique, p_units = map(extra.get, "param_keys unique p_units".split())
assert sorted(unique) == sorted(uk_equil + uk_kinet)
assert sorted(pk) == sorted(eyring_pk)
@requires("numpy", "pyodesys")
def test_get_odesys__ScaledSys():
from pyodesys.symbolic import ScaledSys
k = 0.2
a = Substance("A")
b = Substance("B")
r = Reaction({"A": 1}, {"B": 1}, param=k)
rsys = ReactionSystem([r], [a, b])
assert sorted(rsys.substances.keys()) == ["A", "B"]
odesys = get_odesys(rsys, include_params=True, SymbolicSys=ScaledSys)[0]
c0 = {
"A": 1.0,
"B": 3.0,
}
t = np.linspace(0.0, 10.0)
xout, yout, info = odesys.integrate(t, c0)
yref = | np.zeros((t.size, 2)) | numpy.zeros |
import itertools
import math
import numpy as np
import pandas as pd
import scipy.stats
import statsmodels.stats.multitest
from figures.figure1 import find_leaves
from figures.util import experiments, mcc, get_subplots, produce_figure, plot_grid, plot_annotations, plot_scatter, \
to_col_name
def generate_qval_data(data):
regs = data.region.unique()
leaves = regs[find_leaves(regs)]
data = data[data.region.isin(leaves)]
regs = leaves
qvals = pd.DataFrame({'region': regs, 'region_name': [r['name'] for r in
mcc.get_structure_tree().get_structures_by_acronym(
regs)]}).set_index('region').sort_index()
strains = {
"BL6": 'C57BL/6J',
"CD1": 'FVB.CD1(ICR)',
"ALL": None
}
params = ['count3d', 'density3d', 'coverage', "volume"]
genders = ['M', 'F']
groups = {
(g, s[0], p): data[(data.gender == g) & (data.strain == s[1])].groupby('region')[to_col_name(data, p)].apply(np.array).sort_index()
for g, s, p in itertools.product(genders, strains.items(), params) if s[1] is not None
}
def mdn(a):
return np.median(a[~np.isnan(a)])
for p in params:
for g in genders:
local_data = pd.DataFrame({'left': groups[(g, 'BL6', p)], 'right': groups[(g, 'CD1', p)]}).sort_index()
pvals = produce_pvals(local_data, scipy.stats.ranksums)
pvals['sign'] = 1
qvals[f"BL6_vs_CD1_{g}_{p}_ranksum_rejected"] = pvals.rejected
qvals[f"BL6_vs_CD1_{g}_{p}_ranksum_qval"] = pvals.qval * pvals.sign
qvals[f"BL6_vs_CD1_{g}_{p}_ranksum_pval"] = pvals.pval * pvals.sign
qvals[f"BL6_vs_CD1_{g}_{p}_ranksum_log10(qval)"] = -np.log10(pvals.pval) * pvals.sign
qvals[f"BL6_vs_CD1_{g}_{p}_effect"] = 2 * (local_data.left.apply(mdn) - local_data.right.apply(mdn)) / (local_data.left.apply(mdn) + local_data.right.apply(mdn))
for strain_name, strain in strains.items():
strain_data = data
if strain is not None:
strain_data = strain_data[strain_data.strain == strain]
males = strain_data[strain_data.gender == 'M']
females = strain_data[strain_data.gender == 'F']
male_groups = males.groupby('region')
female_groups = females.groupby('region')
for param in params:
male_groups_data = male_groups[to_col_name(strain_data, param)]
female_groups_data = female_groups[to_col_name(strain_data, param)]
male_data = male_groups_data.apply(np.array).sort_index()
female_data = female_groups_data.apply(np.array).sort_index()
qvals[f'{strain_name}_{param}_male_means'] = male_groups_data.median().sort_index()
qvals[f'{strain_name}_{param}_female_means'] = female_groups_data.median().sort_index()
qvals[f'{strain_name}_male_vs_female_{param}_effect'] = 2 * (
qvals[f'{strain_name}_{param}_male_means'] - qvals[f'{strain_name}_{param}_female_means']) / (
qvals[f'{strain_name}_{param}_male_means'] + qvals[f'{strain_name}_{param}_female_means'])
for method_name, method in {'t-test': scipy.stats.ttest_ind, 'ranksum': scipy.stats.ranksums}.items():
local_data = pd.DataFrame({'left': male_data, 'right': female_data}).sort_index()
pvals = produce_pvals(local_data, method)
pvals['sign'] = np.sign(
qvals[f'{strain_name}_{param}_male_means'] - qvals[f'{strain_name}_{param}_female_means'])
qvals[f"{strain_name}_male_vs_female_{param}_{method_name}_rejected"] = pvals.rejected
qvals[f"{strain_name}_male_vs_female_{param}_{method_name}_qval"] = pvals.qval * pvals.sign
qvals[f"{strain_name}_male_vs_female_{param}_{method_name}_pval"] = pvals.pval * pvals.sign
qvals[f"{strain_name}_male_vs_female_{param}_{method_name}_log10(qval)"] = -np.log10(
pvals.pval) * pvals.sign
for gender, gender_groups in {'male': male_groups, 'female': female_groups}.items():
for param in ['count', 'density', 'region_area']:
left_data = gender_groups[to_col_name(strain_data, f'{param}_left')].apply(np.array).sort_index()
right_data = gender_groups[to_col_name(strain_data, f'{param}_right')].apply(np.array).sort_index()
qvals[f'{strain_name}_{param}_{gender}_left_means'] = gender_groups[to_col_name(strain_data,
f'{param}_left')].median().sort_index()
qvals[f'{strain_name}_{param}_{gender}_right_means'] = gender_groups[to_col_name(strain_data,
f'{param}_right')].median().sort_index()
qvals[f'{strain_name}_{gender}_left_vs_right_{param}_effect'] = 2 * (
qvals[f'{strain_name}_{param}_{gender}_left_means'] - qvals[f'{strain_name}_{param}_{gender}_right_means']) / (
qvals[f'{strain_name}_{param}_{gender}_left_means'] +
qvals[f'{strain_name}_{param}_{gender}_right_means'])
for method_name, method in {'t-test': scipy.stats.ttest_ind, 'ranksum': scipy.stats.ranksums}.items():
local_data = pd.DataFrame({'left': left_data, 'right': right_data}).sort_index()
pvals = produce_pvals(local_data, method)
pvals['sign'] = np.sign(
qvals[f'{strain_name}_{param}_{gender}_left_means']
- qvals[f'{strain_name}_{param}_{gender}_right_means'])
qvals[f"{strain_name}_left_{param}_{gender}_rejected"] = pvals.rejected
qvals[f"{strain_name}_left_vs_right_{param}_{gender}_{method_name}_rejected"] = pvals.rejected
qvals[f"{strain_name}_left_vs_right_{param}_{gender}_{method_name}_qval"] = pvals.qval * pvals.sign
qvals[f"{strain_name}_left_vs_right_{param}_{gender}_{method_name}_pval"] = pvals.pval * pvals.sign
qvals[f"{strain_name}_left_vs_right_{param}_{gender}_{method_name}_log10(qval)"] = -np.log10(
pvals.qval) * pvals.sign
return qvals
def produce_pvals(local_data, method):
regs, pvals = zip(*[(t.Index, method(t.left[~np.isnan(t.left)], t.right[~np.isnan(t.right)]).pvalue)
if min(t.left[~np.isnan(t.left)].shape[0], t.right[~np.isnan(t.right)].shape[0]) >= 20
else (t.Index, math.nan) for t in local_data.itertuples()])
pvals = np.array(pvals)
rejected, pval_corrected = statsmodels.stats.multitest.fdrcorrection(pvals[~np.isnan(pvals)], alpha=0.1)
rej = np.ones_like(pvals)
rej[~np.isnan(pvals)] = rejected
rej[np.isnan(pvals)] = math.nan
qvals = pvals.copy()
qvals[~np.isnan(pvals)] = pval_corrected
pvals = pd.DataFrame({'region': regs, 'pval': pvals, 'qval': qvals, 'rejected': rej}).set_index(
'region').sort_index()
return pvals
def plot_qval_vs_qval(qvals, strain, prefix, highlight_col, x_col, y_col, filename):
regions = qvals.index.to_numpy()
fig, ax = get_subplots()
highlight = qvals[strain + '_' + prefix + "_" + highlight_col + '_qval'].to_numpy().copy()
highlight_rejected = qvals[strain + '_' + prefix + "_" + highlight_col + '_rejected'].to_numpy()
highlight[highlight_rejected == 0] = 1
highlight[np.isnan(highlight)] = 1
x = qvals[strain + '_' + prefix + "_" + x_col + '_log10(qval)'].to_numpy()
y = qvals[strain + '_' + prefix + "_" + y_col + '_log10(qval)'].to_numpy()
non_nans = (~ | np.isnan(x) | numpy.isnan |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-member
# pylint: disable=not-an-iterable
""" Functions
__author__: <NAME>, <NAME>, <NAME>
"""
import numpy as np
from scipy import special
from pymdp.core import utils
from itertools import chain
EPS_VAL = 1e-16 # global constant for use in spm_log() function
def spm_dot(X, x, dims_to_omit=None):
""" Dot product of a multidimensional array with `x`. The dimensions in `dims_to_omit`
will not be summed across during the dot product
Parameters
----------
- `x` [1D numpy.ndarray] - either vector or array of arrays
The alternative array to perform the dot product with
- `dims_to_omit` [list :: int] (optional)
Which dimensions to omit
Returns
-------
- `Y` [1D numpy.ndarray] - the result of the dot product
"""
# Construct dims to perform dot product on
if utils.is_arr_of_arr(x):
# dims = list((np.arange(0, len(x)) + X.ndim - len(x)).astype(int))
dims = list(range(X.ndim - len(x),len(x)+X.ndim - len(x)))
# dims = list(range(X.ndim))
else:
dims = [1]
x = utils.to_arr_of_arr(x)
if dims_to_omit is not None:
arg_list = [X, list(range(X.ndim))] + list(chain(*([x[xdim_i],[dims[xdim_i]]] for xdim_i in range(len(x)) if xdim_i not in dims_to_omit))) + [dims_to_omit]
else:
arg_list = [X, list(range(X.ndim))] + list(chain(*([x[xdim_i],[dims[xdim_i]]] for xdim_i in range(len(x))))) + [[0]]
Y = np.einsum(*arg_list)
# check to see if `Y` is a scalar
if np.prod(Y.shape) <= 1.0:
Y = Y.item()
Y = np.array([Y]).astype("float64")
return Y
def spm_dot_classic(X, x, dims_to_omit=None):
""" Dot product of a multidimensional array with `x`. The dimensions in `dims_to_omit`
will not be summed across during the dot product
Parameters
----------
- `x` [1D numpy.ndarray] - either vector or array of arrays
The alternative array to perform the dot product with
- `dims_to_omit` [list :: int] (optional)
Which dimensions to omit
Returns
-------
- `Y` [1D numpy.ndarray] - the result of the dot product
"""
# Construct dims to perform dot product on
if utils.is_arr_of_arr(x):
dims = (np.arange(0, len(x)) + X.ndim - len(x)).astype(int)
else:
dims = np.array([1], dtype=int)
x = utils.to_arr_of_arr(x)
# delete ignored dims
if dims_to_omit is not None:
if not isinstance(dims_to_omit, list):
raise ValueError("`dims_to_omit` must be a `list` of `int`")
dims = np.delete(dims, dims_to_omit)
if len(x) == 1:
x = np.empty([0], dtype=object)
else:
x = np.delete(x, dims_to_omit)
# compute dot product
for d in range(len(x)):
s = np.ones(np.ndim(X), dtype=int)
s[dims[d]] = np.shape(x[d])[0]
X = X * x[d].reshape(tuple(s))
# X = np.sum(X, axis=dims[d], keepdims=True)
Y = np.sum(X, axis=tuple(dims.astype(int))).squeeze()
# Y = np.squeeze(X)
# check to see if `Y` is a scalar
if np.prod(Y.shape) <= 1.0:
Y = Y.item()
Y = np.array([Y]).astype("float64")
return Y
def spm_dot_old(X, x, dims_to_omit=None, obs_mode=False):
""" Dot product of a multidimensional array with `x`. The dimensions in `dims_to_omit`
will not be summed across during the dot product
#TODO: we should look for an alternative to obs_mode
Parameters
----------
- `x` [1D numpy.ndarray] - either vector or array of arrays
The alternative array to perform the dot product with
- `dims_to_omit` [list :: int] (optional)
Which dimensions to omit
Returns
-------
- `Y` [1D numpy.ndarray] - the result of the dot product
"""
# Construct dims to perform dot product on
if utils.is_arr_of_arr(x):
dims = (np.arange(0, len(x)) + X.ndim - len(x)).astype(int)
else:
if obs_mode is True:
"""
@NOTE Case when you're getting the likelihood of an observation under
the generative model. Equivalent to something like self.values[np.where(x),:]
when `x` is a discrete 'one-hot' observation vector
"""
dims = np.array([0], dtype=int)
else:
"""
@NOTE Case when `x` leading dimension matches the lagging dimension of `values`
E.g. a more 'classical' dot product of a likelihood with hidden states
"""
dims = np.array([1], dtype=int)
x = utils.to_arr_of_arr(x)
# delete ignored dims
if dims_to_omit is not None:
if not isinstance(dims_to_omit, list):
raise ValueError("`dims_to_omit` must be a `list` of `int`")
dims = np.delete(dims, dims_to_omit)
if len(x) == 1:
x = np.empty([0], dtype=object)
else:
x = np.delete(x, dims_to_omit)
# compute dot product
for d in range(len(x)):
s = np.ones(np.ndim(X), dtype=int)
s[dims[d]] = np.shape(x[d])[0]
X = X * x[d].reshape(tuple(s))
# X = np.sum(X, axis=dims[d], keepdims=True)
Y = np.sum(X, axis=tuple(dims.astype(int))).squeeze()
# Y = np.squeeze(X)
# check to see if `Y` is a scalar
if np.prod(Y.shape) <= 1.0:
Y = Y.item()
Y = np.array([Y]).astype("float64")
return Y
def spm_cross(x, y=None, *args):
""" Multi-dimensional outer product
Parameters
----------
- `x` [np.ndarray] || [Categorical] (optional)
The values to perfrom the outer-product with. If empty, then the outer-product
is taken between x and itself. If y is not empty, then outer product is taken
between x and the various dimensions of y.
- `args` [np.ndarray] || [Categorical] (optional)
Remaining arrays to perform outer-product with. These extra arrays are recursively
multiplied with the 'initial' outer product (that between X and x).
Returns
-------
- `z` [np.ndarray] || [Categorical]
The result of the outer-product
"""
if len(args) == 0 and y is None:
if utils.is_arr_of_arr(x):
z = spm_cross(*list(x))
elif np.issubdtype(x.dtype, np.number):
z = x
else:
raise ValueError(f"Invalid input to spm_cross ({x})")
return z
if utils.is_arr_of_arr(x):
x = spm_cross(*list(x))
if y is not None and utils.is_arr_of_arr(y):
y = spm_cross(*list(y))
reshape_dims = tuple(list(x.shape) + list(np.ones(y.ndim, dtype=int)))
A = x.reshape(reshape_dims)
reshape_dims = tuple(list(np.ones(x.ndim, dtype=int)) + list(y.shape))
B = y.reshape(reshape_dims)
z = np.squeeze(A * B)
for x in args:
z = spm_cross(z, x)
return z
def dot_likelihood(A,obs):
s = np.ones(np.ndim(A), dtype = int)
s[0] = obs.shape[0]
X = A * obs.reshape(tuple(s))
X = np.sum(X, axis=0, keepdims=True)
LL = np.squeeze(X)
# check to see if `LL` is a scalar
if np.prod(LL.shape) <= 1.0:
LL = LL.item()
LL = np.array([LL]).astype("float64")
return LL
def get_joint_likelihood(A, obs, num_states):
# deal with single modality case
if type(num_states) is int:
num_states = [num_states]
A = utils.to_arr_of_arr(A)
obs = utils.to_arr_of_arr(obs)
ll = np.ones(tuple(num_states))
for modality in range(len(A)):
ll = ll * dot_likelihood(A[modality], obs[modality])
return ll
def get_joint_likelihood_seq(A, obs, num_states):
ll_seq = np.empty(len(obs), dtype=object)
for t in range(len(obs)):
ll_seq[t] = get_joint_likelihood(A, obs[t], num_states)
return ll_seq
def spm_norm(A):
"""
Returns normalization of Categorical distribution,
stored in the columns of A.
"""
A = A + EPS_VAL
normed_A = np.divide(A, A.sum(axis=0))
return normed_A
def spm_log(arr):
"""
Adds small epsilon value to an array before natural logging it
"""
return np.log(arr + EPS_VAL)
def spm_wnorm(A):
"""
Returns Expectation of logarithm of Dirichlet parameters over a set of
Categorical distributions, stored in the columns of A.
"""
A = A + EPS_VAL
norm = np.divide(1.0, np.sum(A, axis=0))
avg = np.divide(1.0, A)
wA = norm - avg
return wA
def spm_betaln(z):
""" Log of the multivariate beta function of a vector.
@NOTE this function computes across columns if `z` is a matrix
"""
return np.sum(special.gammaln(z), axis=0) - special.gammaln(np.sum(z, axis=0))
def softmax(dist, return_numpy=True):
""" Computes the softmax function on a set of values
"""
if utils.is_distribution(dist):
if dist.IS_AOA:
output = []
for i in range(len(dist.values)):
output[i] = softmax(dist.values[i], return_numpy=True)
output = utils.to_categorical(np.array(output))
else:
dist = np.copy(dist.values)
output = dist - dist.max(axis=0)
output = np.exp(output)
output = output / | np.sum(output, axis=0) | numpy.sum |
from coopihc.base.StateElement import StateElement
from coopihc.base.utils import (
StateNotContainedError,
StateNotContainedWarning,
)
from coopihc.base.elements import integer_set, box_space
import numpy
import pytest
import json
import copy
from tabulate import tabulate
def test_array_init_integer():
x = StateElement(2, integer_set(3))
assert hasattr(x, "space")
assert x.shape == ()
assert x == 2
def test_array_init_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="error"
)
assert hasattr(x, "space")
assert x.shape == (2, 2)
assert (x == numpy.zeros((2, 2))).all()
def test_array_init():
test_array_init_integer()
test_array_init_numeric()
def test_array_init_error_integer():
x = StateElement(2, integer_set(3), out_of_bounds_mode="error")
with pytest.raises(StateNotContainedError):
x = StateElement(4, integer_set(3), out_of_bounds_mode="error")
with pytest.raises(StateNotContainedError):
x = StateElement(-3, integer_set(3), out_of_bounds_mode="error")
def test_array_init_error_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="error"
)
with pytest.raises(StateNotContainedError):
x = StateElement(
2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="error",
)
with pytest.raises(StateNotContainedError):
x = StateElement(
-2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="error",
)
with pytest.raises(StateNotContainedError):
x = StateElement(
numpy.array([[0, 0], [-2, 0]]),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="error",
)
def test_array_init_error():
test_array_init_error_integer()
test_array_init_error_numeric()
def test_array_init_warning_integer():
x = StateElement(2, integer_set(3), out_of_bounds_mode="warning")
with pytest.warns(StateNotContainedWarning):
x = StateElement(4, integer_set(3), out_of_bounds_mode="warning")
with pytest.warns(StateNotContainedWarning):
x = StateElement(-3, integer_set(3), out_of_bounds_mode="warning")
def test_array_init_warning_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="warning"
)
with pytest.warns(StateNotContainedWarning):
x = StateElement(
2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="warning",
)
with pytest.warns(StateNotContainedWarning):
x = StateElement(
-2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="warning",
)
with pytest.warns(StateNotContainedWarning):
x = StateElement(
numpy.array([[0, 0], [-2, 0]]),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="warning",
)
def test_array_init_warning():
test_array_init_warning_integer()
test_array_init_warning_numeric()
def test_array_init_clip_integer():
x = StateElement(2, integer_set(3), out_of_bounds_mode="clip")
assert x == numpy.array([2])
x = StateElement(4, integer_set(3), out_of_bounds_mode="clip")
assert x == numpy.array([2])
x = StateElement(-3, integer_set(3), out_of_bounds_mode="clip")
assert x == numpy.array([0])
def test_array_init_clip_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="clip"
)
assert (x == numpy.zeros((2, 2))).all()
x = StateElement(
2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="clip",
)
assert (x == numpy.ones((2, 2))).all()
x = StateElement(
-2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="clip",
)
assert (x == -1.0 * numpy.ones((2, 2))).all()
def test_array_init_clip():
test_array_init_clip_integer()
test_array_init_clip_numeric()
def test_array_init_dtype_integer():
x = StateElement(2, integer_set(3), out_of_bounds_mode="warning")
assert x.dtype == numpy.int64
x = StateElement(2, integer_set(3, dtype=numpy.int16), out_of_bounds_mode="warning")
assert x.dtype == numpy.int16
def test_array_init_dtype_numeric():
x = StateElement(
numpy.zeros((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="warning",
)
assert x.dtype == numpy.float64
x = StateElement(
numpy.zeros((2, 2)),
box_space(numpy.ones((2, 2), dtype=numpy.float32)),
out_of_bounds_mode="warning",
)
assert x.dtype == numpy.float32
x = StateElement(
numpy.zeros((2, 2)),
box_space(numpy.ones((2, 2), dtype=numpy.int8)),
out_of_bounds_mode="warning",
)
assert x.dtype == numpy.int8
def test_array_init_dtype():
test_array_init_dtype_integer()
test_array_init_dtype_numeric()
# def test__array_ufunc__discrete():
# # Simple arithmetic
# global discr_space
# x = StateElement(2, discr_space, out_of_bounds_mode="error")
# assert x + numpy.array(1) == 3
# assert x + 1 == 3
# assert x - 1 == 1
# assert 3 - x == 1
# assert x - numpy.array(1) == 1
# assert numpy.array(3) - x == 1
# assert 1 + x == 3
# x += 1
# y = x - 1
# assert y.out_of_bounds_mode == "error"
# with pytest.raises(StateNotContainedError):
# 1 - x
# with pytest.raises(StateNotContainedError):
# x + 2
# with pytest.raises(StateNotContainedError):
# x += 5
# def test__array_ufunc__continuous():
# # some matrix operations
# global cont_space
# x = StateElement(numpy.zeros((2, 2)), cont_space, out_of_bounds_mode="error")
# assert (x + numpy.ones((2, 2)) == numpy.ones((2, 2))).all()
# assert (x + 1 == numpy.ones((2, 2))).all()
# assert (1 + x == numpy.ones((2, 2))).all()
# assert (x - 1 == -numpy.ones((2, 2))).all()
# assert (1 - x == numpy.ones((2, 2))).all()
# assert ((1 + x) * 0.5 == 0.5 * numpy.ones((2, 2))).all()
# assert (0.5 * (1 + x) @ numpy.ones((2, 2)) == numpy.ones((2, 2))).all()
# def test__array_ufunc__multidiscrete():
# global multidiscr_space
# x = StateElement([1, 1, 8], multidiscr_space, out_of_bounds_mode="error")
# assert (x + numpy.array([[1], [1], [-3]]) == numpy.array([[2], [2], [5]])).all()
# with pytest.raises(StateNotContainedError):
# x + numpy.array([[1], [1], [1]])
# def test__array_ufunc__comparisons():
# global discr_space
# x = StateElement(2, discr_space, out_of_bounds_mode="error")
# assert x > 1 == True
# global cont_space
# x = StateElement(numpy.zeros((2, 2)), cont_space, out_of_bounds_mode="error")
# assert (x < 0).all() == False
# global multidiscr_space
# x = StateElement(
# numpy.array([[1], [1], [1]]), multidiscr_space, out_of_bounds_mode="error"
# )
# assert (x >= numpy.array([[1], [0], [1]])).all() == True
# assert (x >= numpy.array([[1], [5], [1]])).all() == False
# comp = x >= numpy.array([[1], [5], [1]])
# assert (comp == numpy.array([[True], [False], [True]])).all()
# def test__array_ufunc__trigonometry():
# global cont_space
# x = StateElement(numpy.zeros((2, 2)), cont_space, out_of_bounds_mode="error")
# assert (numpy.cos(x) == numpy.ones((2, 2))).all()
# def test__array_ufunc__floating():
# global cont_space
# x = StateElement(
# numpy.array([[0.2, 0.3], [1, 0.95]]), cont_space, out_of_bounds_mode="error"
# )
# assert numpy.isfinite(x).all() == True
# def test__array_ufunc__out_of_bounds_mode():
# x = StateElement(
# numpy.array([[0.2, 0.3], [1, 0.95]]), cont_space, out_of_bounds_mode="error"
# )
# y = StateElement(
# numpy.array([[-0.2, -0.3], [-1, -0.95]]),
# cont_space,
# out_of_bounds_mode="warning",
# )
# z = StateElement(
# numpy.array([[0.0, 0.0], [0.0, 0.0]]),
# cont_space,
# out_of_bounds_mode="silent",
# )
# u = x + y
# assert u.out_of_bounds_mode == "error"
# u = y + x
# assert u.out_of_bounds_mode == "error"
# u = z + x
# assert u.out_of_bounds_mode == "error"
# u = y + z
# assert u.out_of_bounds_mode == "warning"
# u = z + 0
# assert u.out_of_bounds_mode == "silent"
# def test__array_ufunc__():
# test__array_ufunc__discrete()
# test__array_ufunc__continuous()
# test__array_ufunc__multidiscrete()
# test__array_ufunc__comparisons()
# test__array_ufunc__trigonometry()
# test__array_ufunc__floating()
# test__array_ufunc__out_of_bounds_mode()
# def test_amax_nothandled():
# StateElement.HANDLED_FUNCTIONS = {}
# cont_space = autospace(
# [[-1, -1], [-1, -1]], [[1, 1], [1, 1]], dtype=numpy.float64
# ) # Here the
# x = StateElement(
# numpy.array([[0, 0.1], [-0.5, 0.8]], dtype=numpy.float64),
# cont_space,
# out_of_bounds_mode="warning",
# )
# # Without handled function
# with pytest.warns(NumpyFunctionNotHandledWarning):
# y = numpy.max(x)
# assert isinstance(y, numpy.ndarray)
# assert not isinstance(y, StateElement)
# assert y == 0.8
# assert not hasattr(y, "space")
# assert not hasattr(y, "out_of_bounds_mode")
# def test_amax_implements_decorator():
# cont_space = autospace([[-1, -1], [-1, -2]], [[1, 1], [1, 3]], dtype=numpy.float64)
# x = StateElement(
# numpy.array([[0, 0.1], [-0.5, 0.8]], dtype=numpy.float64),
# cont_space,
# out_of_bounds_mode="warning",
# )
# @StateElement.implements(numpy.amax)
# def amax(arr, **keywordargs):
# space, out_of_bounds_mode, kwargs = (
# arr.space,
# arr.out_of_bounds_mode,
# arr.kwargs,
# )
# obj = arr.view(numpy.ndarray)
# argmax = numpy.argmax(obj, **keywordargs)
# index = numpy.unravel_index(argmax, arr.space.shape)
# obj = numpy.amax(obj, **keywordargs)
# obj = numpy.asarray(obj).view(StateElement)
# if arr.space.space_type == "continuous":
# obj.space = autospace(
# numpy.atleast_2d(arr.space.low[index[0], index[1]]),
# numpy.atleast_2d(arr.space.high[index[0], index[1]]),
# )
# else:
# raise NotImplementedError
# obj.out_of_bounds_mode = arr.out_of_bounds_mode
# obj.kwargs = arr.kwargs
# return obj
# y = numpy.amax(x)
# assert isinstance(y, StateElement)
# assert StateElement.HANDLED_FUNCTIONS.get(numpy.amax) is not None
# assert x.HANDLED_FUNCTIONS.get(numpy.amax) is not None
# assert y.shape == ()
# assert y == 0.8
# assert y.space.space_type == "continuous"
# assert y.space.shape == (1, 1)
# assert y.space.low == numpy.array([[-2]])
# assert y.space.high == numpy.array([[3]])
# def test_array_function_simple():
# test_amax_nothandled()
# test_amax_implements_decorator()
# def test__array_function__():
# test_array_function_simple()
def test_equals_integer():
int_space = integer_set(3)
other_int_space = integer_set(4)
x = StateElement(numpy.array(1), int_space)
y = StateElement(numpy.array(1), other_int_space)
assert x.equals(y)
assert not x.equals(y, mode="hard")
z = StateElement(numpy.array(2), int_space)
assert not x.equals(z)
def test_equals_numeric():
numeric_space = box_space(numpy.ones((2, 2)))
other_numeric_space = box_space(
low=numpy.array([[-1, -1], [-1, -2]]), high=numpy.array([[1, 2], [1, 1]])
)
x = StateElement(numpy.zeros((2, 2)), numeric_space)
y = StateElement(numpy.zeros((2, 2)), other_numeric_space)
assert (x.equals(y)).all()
assert not (x.equals(y, mode="hard")).all()
z = StateElement(numpy.eye(2), numeric_space)
assert not (x.equals(z)).all()
def test_equals():
test_equals_integer()
test_equals_numeric()
def test__iter__integer():
x = StateElement([2], integer_set(3))
with pytest.raises(TypeError):
next(iter(x))
def test__iter__numeric():
x = StateElement(
numpy.array([[0.2, 0.3], [0.4, 0.5]]), box_space(numpy.ones((2, 2)))
)
for i, _x in enumerate(x):
if i == 0:
assert (
_x == StateElement(numpy.array([0.2, 0.3]), box_space(numpy.ones((2,))))
).all()
if i == 1:
assert (
_x == StateElement(numpy.array([0.4, 0.5]), box_space(numpy.ones((2,))))
).all()
for j, _xx in enumerate(_x):
print(i, j)
if i == 0 and j == 0:
assert _xx == StateElement(
numpy.array(0.2), box_space(numpy.float64(1))
)
elif i == 0 and j == 1:
assert _xx == StateElement(
numpy.array(0.3), box_space(numpy.float64(1))
)
elif i == 1 and j == 0:
assert _xx == StateElement(
numpy.array(0.4), box_space(numpy.float64(1))
)
elif i == 1 and j == 1:
assert _xx == StateElement(
numpy.array(0.5), box_space(numpy.float64(1))
)
def test__iter__():
test__iter__integer()
test__iter__numeric()
def test__repr__integer():
x = StateElement(2, integer_set(3))
assert x.__repr__() == "StateElement(array(2), CatSet([0 1 2]), 'warning')"
def test__repr__numeric():
x = StateElement(numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))))
x.__repr__()
def test__repr__():
test__repr__integer()
test__repr__numeric()
def test_serialize_integer():
x = StateElement(numpy.array([2]), integer_set(3))
assert x.serialize() == {
"values": 2,
"space": {
"space": "CatSet",
"seed": None,
"array": [0, 1, 2],
"dtype": "dtype[int64]",
},
}
def test_serialize_numeric():
x = StateElement(numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))))
assert x.serialize() == {
"values": [[0.0, 0.0], [0.0, 0.0]],
"space": {
"space": "Numeric",
"seed": None,
"low,high": [[[-1.0, -1.0], [-1.0, -1.0]], [[1.0, 1.0], [1.0, 1.0]]],
"shape": (2, 2),
"dtype": "dtype[float64]",
},
}
def test_serialize():
test_serialize_integer()
test_serialize_numeric()
def test__getitem__integer():
x = StateElement(1, integer_set(3))
assert x[..., {"space": True}] == x
assert x[..., {"space": True}] is x
assert x[...] == x
def test__getitem__numeric():
x = StateElement(
numpy.array([[0.0, 0.1], [0.2, 0.3]]), box_space(numpy.ones((2, 2)))
)
assert x[0, 0] == 0.0
assert x[0, 0, {"space": True}] == StateElement(0.0, box_space(numpy.float64(1)))
assert x[0, 1, {"space": True}] == StateElement(0.1, box_space(numpy.float64(1)))
assert x[1, 0, {"space": True}] == StateElement(0.2, box_space(numpy.float64(1)))
assert x[1, 1, {"space": True}] == StateElement(0.3, box_space(numpy.float64(1)))
assert (x[:, 1] == numpy.array([0.1, 0.3])).all()
assert (
x[:, 1, {"space": True}]
== StateElement(numpy.array([0.1, 0.3]), box_space(numpy.ones((2,))))
).all()
x = StateElement(numpy.array(0), box_space(low=-1, high=1))
from coopihc import State
s = State()
s["x"] = x
fd = {"x": ...}
a = s.filter(mode="stateelement", filterdict=fd)
def test__getitem__():
test__getitem__integer()
test__getitem__numeric()
def test__setitem__integer():
x = StateElement(1, integer_set(3))
x[...] = 2
assert x == StateElement(2, integer_set(3))
with pytest.warns(StateNotContainedWarning):
x[...] = 4
def test__setitem__numeric():
x = StateElement(
numpy.array([[0.0, 0.1], [0.2, 0.3]]), box_space(numpy.ones((2, 2)))
)
x[0, 0] = 0.5
x[0, 1] = 0.6
x[1, 0] = 0.7
x[1, 1] = 0.8
assert (
x
== StateElement(
numpy.array([[0.5, 0.6], [0.7, 0.8]]), box_space(numpy.ones((2, 2)))
)
).all()
with pytest.warns(StateNotContainedWarning):
x[0, 0] = 1.3
x = StateElement(
numpy.array([[0.0, 0.1], [0.2, 0.3]]),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="clip",
)
x[:, 0] = numpy.array([0.9, 0.9])
x[0, :] = numpy.array([1.2, 0.2])
x[1, 1] = 0.5
assert (
x
== StateElement(
| numpy.array([[1, 0.2], [0.9, 0.5]]) | numpy.array |
#!/usr/bin/env python
"""Make big QUOCKA cubes"""
from IPython import embed
import schwimmbad
import sys
from glob import glob
from tqdm import tqdm
import matplotlib.pyplot as plt
from radio_beam import Beam, Beams
from radio_beam.utils import BeamError
from astropy import units as u
from astropy.io import fits
from astropy.wcs import WCS
import au2
import scipy.signal
import numpy as np
from functools import partial
import reproject as rpj
import warnings
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', category=AstropyWarning)
# Require reproject >= 0.7
try:
assert float(rpj.__version__[0:3]) >= 0.7
except AssertionError:
print('We require reproject version > 0.7')
print(f'Current version is {rpj.__version__}')
print('Please update reproject!')
quit()
class Error(Exception):
"""Base class for other exceptions"""
pass
class GridError(Error):
"""Raised when grid is too coarse for the convolving beam"""
pass
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return np.ceil(n * multiplier) / multiplier
def my_ceil(a, precision=0):
return | np.round(a + 0.5 * 10**(-precision), precision) | numpy.round |
# base.py
# Author: <NAME> <<EMAIL>>
"""
This file contains code that implements the core of the submodular selection
algorithms.
"""
import numpy
from tqdm import tqdm
from ..optimizers import BaseOptimizer
from ..optimizers import NaiveGreedy
from ..optimizers import LazyGreedy
from ..optimizers import ApproximateLazyGreedy
from ..optimizers import TwoStageGreedy
from ..optimizers import StochasticGreedy
from ..optimizers import BidirectionalGreedy
from ..optimizers import GreeDi
from ..optimizers import SieveGreedy
from ..optimizers import OPTIMIZERS
from ..utils import PriorityQueue
from ..utils import check_random_state
from ..utils import _calculate_pairwise_distances
from scipy.sparse import csr_matrix
class BaseSelection(object):
"""The base selection object.
This object defines the structures that all submodular selection algorithms
should follow. All algorithms will have the same public methods and the
same attributes.
Parameters
----------
n_samples : int
The number of samples to return.
initial_subset : list, numpy.ndarray or None, optional
If provided, this should be a list of indices into the data matrix
to use as the initial subset, or a group of examples that may not be
in the provided data should beused as the initial subset. If indices,
the provided array should be one-dimensional. If a group of examples,
the data should be 2 dimensional.
optimizer : string or optimizers.BaseOptimizer, optional
The optimization approach to use for the selection. Default is
'two-stage', which makes selections using the naive greedy algorithm
initially and then switches to the lazy greedy algorithm. Must be
one of
'naive' : the naive greedy algorithm
'lazy' : the lazy (or accelerated) greedy algorithm
'approximate-lazy' : the approximate lazy greedy algorithm
'two-stage' : starts with naive and switches to lazy
'stochastic' : the stochastic greedy algorithm
'greedi' : the GreeDi distributed algorithm
'bidirectional' : the bidirectional greedy algorithm
Default is 'naive'.
optimizer_kwds : dict or None
A dictionary of arguments to pass into the optimizer object. The keys
of this dictionary should be the names of the parameters in the optimizer
and the values in the dictionary should be the values that these
parameters take. Default is None.
reservoir : numpy.ndarray or None
The reservoir to use when calculating gains in the sieve greedy
streaming optimization algorithm in the `partial_fit` method.
Currently only used for graph-based functions. If a numpy array
is passed in, it will be used as the reservoir. If None is passed in,
will use reservoir sampling to collect a reservoir. Default is None.
max_reservoir_size : int
The maximum size that the reservoir can take. If a reservoir is passed
in, this value is set to the size of that array. Default is 1000.
n_jobs : int
The number of threads to use when performing computation in parallel.
Currently, this parameter is exposed but does not actually do anything.
This will be fixed soon.
random_state : int or RandomState or None, optional
The random seed to use for the random selection process. Only used
for stochastic greedy.
verbose : bool
Whether to print output during the selection process.
Attributes
----------
n_samples : int
The number of samples to select.
ranking : numpy.array int
The selected samples in the order of their gain with the first number in
the ranking corresponding to the index of the first sample that was
selected by the greedy procedure.
gains : numpy.array float
The gain of each sample in the returned set when it was added to the
growing subset. The first number corresponds to the gain of the first
added sample, the second corresponds to the gain of the second added
sample, and so forth.
"""
def __init__(self, n_samples, initial_subset=None, optimizer='lazy',
optimizer_kwds={}, reservoir=None, max_reservoir_size=1000,
n_jobs=1, random_state=None, verbose=False):
if n_samples <= 0:
raise ValueError("n_samples must be a positive value.")
if not isinstance(initial_subset, (list, numpy.ndarray)) and initial_subset is not None:
raise ValueError("initial_subset must be a list, numpy array, or None")
if isinstance(initial_subset, (list, numpy.ndarray)):
initial_subset = numpy.array(initial_subset)
if not isinstance(optimizer, BaseOptimizer):
if optimizer not in OPTIMIZERS.keys():
raise ValueError("Optimizer must be an optimizer object or " \
"a str in {}.".format(str(OPTIMIZERS.keys())))
if isinstance(optimizer, BaseOptimizer):
optimizer.function = self
if verbose not in (True, False):
raise ValueError("verbosity must be True or False")
self.n_samples = n_samples
self.metric = 'ignore'
self.random_state = check_random_state(random_state)
self.optimizer = optimizer
self.optimizer_kwds = optimizer_kwds
self.n_jobs = n_jobs
self.verbose = verbose
self.initial_subset = initial_subset
self.ranking = None
self.idxs = None
self.gains = None
self.subset = None
self.sparse = None
self._X = None
self.sieve_current_values_ = None
self.n_seen_ = 0
self.reservoir_size = 0 if reservoir is None else reservoir.shape[0]
self.reservoir = reservoir
self.max_reservoir_size = max_reservoir_size if reservoir is None else reservoir.shape[0]
self.update_reservoir_ = reservoir is None
def fit(self, X, y=None, sample_weight=None, sample_cost=None):
"""Run submodular optimization to select a subset of examples.
This method is a wrapper for the full submodular optimization process.
It takes in some data set (and optionally labels that are ignored
during this process) and selects `n_samples` from it in the greedy
manner specified by the optimizer.
This method will return the selector object itself, not the transformed
data set. The `transform` method will then transform a data set to the
selected points, or alternatively one can use the ranking stored in
the `self.ranking` attribute. The `fit_transform` method will perform
both optimization and selection and return the selected items.
Parameters
----------
X : list or numpy.ndarray, shape=(n, d)
The data set to transform. Must be numeric.
y : list or numpy.ndarray or None, shape=(n,), optional
The labels to transform. If passed in this function will return
both the data and th corresponding labels for the rows that have
been selected.
sample_weight : list or numpy.ndarray or None, shape=(n,), optional
The weight of each example. Currently ignored in apricot but
included to maintain compatibility with sklearn pipelines.
sample_cost : list or numpy.ndarray or None, shape=(n,), optional
The cost of each item. If set, indicates that optimization should
be performed with respect to a knapsack constraint.
Returns
-------
self : BaseGraphSelection
The fit step returns this selector object.
"""
allowed_dtypes = list, numpy.ndarray, csr_matrix
if not isinstance(X, allowed_dtypes):
raise ValueError("X must be either a list of lists, a 2D numpy " \
"array, or a scipy.sparse.csr_matrix.")
if isinstance(X, numpy.ndarray) and len(X.shape) != 2:
raise ValueError("X must have exactly two dimensions.")
if numpy.min(X) < 0.0 and numpy.max(X) > 0.:
raise ValueError("X cannot contain negative values or must be entirely "\
"negative values.")
if self.n_samples > X.shape[0]:
raise ValueError("Cannot select more examples than the number in" \
" the data set.")
if not self.sparse:
if X.dtype != 'float64':
X = X.astype('float64')
if isinstance(self.optimizer, str):
optimizer = OPTIMIZERS[self.optimizer](function=self,
verbose=self.verbose, random_state=self.random_state,
**self.optimizer_kwds)
else:
optimizer = self.optimizer
self._X = X if self._X is None else self._X
self._initialize(X)
if self.verbose:
self.pbar = tqdm(total=self.n_samples, unit_scale=True)
optimizer.select(X, self.n_samples, sample_cost=sample_cost)
if self.verbose == True:
self.pbar.close()
self.ranking = | numpy.array(self.ranking) | numpy.array |
import numpy as np
import loupe
def test_sserror_forward():
data = np.ones((10,10))
model = loupe.ones((10,10))
err = loupe.sserror(model, data, mask=None, gain_bias_invariant=False)
assert err.data == 0
model = 0.9*loupe.ones((10,10))
err = loupe.sserror(model, data, mask=None, gain_bias_invariant=False)
assert np.allclose(err.data, 0.01)
def test_sserror_fit_gain():
model = loupe.rand(size=(10,10))
gain = np.random.uniform(low=-0.5, high=0.5)
data = model.data.copy() * gain
_model = loupe.sserror._G(model.data, mask=None)
_data = loupe.sserror._G(data, mask=None)
gain_est = loupe.sserror._fit_gain(_data, _model, mask=None)
assert np.allclose(gain, gain_est)
def test_sserror_fit_bias():
model = loupe.rand(size=(10,10))
bias = np.random.uniform(low=-5, high=5)
data = model.data.copy() + bias
bias_est = loupe.sserror._fit_bias(data, model.data, mask=None, gain=1)
assert np.allclose(bias, bias_est)
def test_sserror_fit_gain_bias():
model = loupe.rand(size=(10,10))
gain = np.random.uniform(low=-0.5, high=0.5)
bias = np.random.uniform(low=-5, high=5)
data = (model.data.copy() * gain) + bias
_model = loupe.sserror._G(model.data, mask=None)
_data = loupe.sserror._G(data, mask=None)
gain_est = loupe.sserror._fit_gain(_data, _model, mask=None)
bias_est = loupe.sserror._fit_bias(data, model.data, mask=None, gain=gain_est)
assert np.allclose(gain, gain_est)
assert np.allclose(bias, bias_est)
def test_sserror_forward_multi():
model = loupe.rand(size=(3,10,10))
gain = np.random.uniform(low=-0.5, high=0.5)
bias = np.random.uniform(low=-5, high=5)
data_clean = model.data.copy()
data_clean += np.random.uniform(low=-.1, high=.1, size=(3,10,10))
data = gain * data_clean + bias
err = loupe.sserror(model, data, mask=None, gain_bias_invariant=True)
assert err.data
def test_sserror_backward_multi():
model = loupe.rand(size=(3,10,10), requires_grad=True)
gain = np.random.uniform(low=-0.5, high=0.5)
bias = np.random.uniform(low=-5, high=5)
data_clean = model.data.copy()
data_clean += np.random.uniform(low=-.1, high=.1, size=(3,10,10))
data = gain * data_clean + bias
err = loupe.sserror(model, data, mask=None, gain_bias_invariant=True)
err.backward(grad=1.0)
assert np.all(model.grad != 0)
assert model.grad.shape == model.shape
def test_sserror_residual():
model = loupe.rand(size=(3,10,10))
gain = | np.random.uniform(low=-0.5, high=0.5) | numpy.random.uniform |
from unittest import TestCase
import numpy as np
from kmeans import iou, avg_iou, kmeans
class TestBasic(TestCase):
def test_iou_100(self):
self.assertEqual(iou([200, 200], np.array([[200, 200]])), 1.)
def test_iou_50(self):
self.assertEqual(iou([200, 200], np.array([[100, 200]])), .5)
self.assertEqual(iou([200, 200], np.array([[200, 100]])), .5)
def test_iou_75(self):
self.assertEqual(iou([200, 200], np.array([[150, 200]])), .75)
self.assertEqual(iou([200, 200], np.array([[200, 150]])), .75)
def test_iou_20(self):
self.assertEqual(iou([183, 73], np.array([[73, 36.6]])), .2)
self.assertEqual(iou([183, 73], np.array([[36.6, 73]])), .2)
def test_iou_multiple(self):
a = np.array([[200, 200], [100, 200], [200, 100], [150, 200], [200, 150]])
b = np.array([1., 0.5, 0.5, 0.75, 0.75])
self.assertTrue((iou([200, 200], a) == b).all())
def test_iou_0(self):
self.assertRaises(ValueError, iou, [100, 100], np.array([[0, 0]]))
self.assertRaises(ValueError, iou, [0, 0], np.array([[100, 100]]))
self.assertRaises(ValueError, iou, [0, 0], np.array([[0, 0]]))
self.assertRaises(ValueError, iou, [100, 0], np.array([[100, 100]]))
self.assertRaises(ValueError, iou, [0, 100], | np.array([[100, 100]]) | numpy.array |
from os.path import abspath, dirname
from os.path import join as pjoin
import h5py
import numpy as np
import pytest
from cfelpyutils.crystfel_utils import load_crystfel_geometry
from matplotlib.axes import Axes
from testpath import assert_isfile
from extra_geom import LPD_1MGeometry
from extra_geom.detectors import invert_xfel_lpd_geom
from .utils import assert_geom_close
tests_dir = dirname(abspath(__file__))
def test_write_read_crystfel_file(tmpdir):
geom = LPD_1MGeometry.from_quad_positions(
[(11.4, 299), (-11.5, 8), (254.5, -16), (278.5, 275)]
)
path = str(tmpdir / 'test.geom')
geom.write_crystfel_geom(filename=path)
with open(path, 'r') as f:
contents = f.read()
with open(path, 'w') as f:
f.write('clen = 0.119\n')
f.write('adu_per_eV = 0.0075\n')
f.write(contents)
# Load the geometry file with cfelpyutils and test the ridget groups
loaded = LPD_1MGeometry.from_crystfel_geom(path)
np.testing.assert_allclose(
loaded.modules[0][0].corner_pos, geom.modules[0][0].corner_pos
)
np.testing.assert_allclose(loaded.modules[0][0].fs_vec, geom.modules[0][0].fs_vec)
geom_dict = load_crystfel_geometry(path)
quad_gr0 = ['p0a0', 'p0a1', 'p0a2', 'p0a3', 'p0a4', 'p0a5', 'p0a6', 'p0a7',
'p0a8', 'p0a9', 'p0a10','p0a11', 'p0a12', 'p0a13', 'p0a14',
'p0a15', 'p1a0', 'p1a1','p1a2', 'p1a3','p1a4','p1a5','p1a6',
'p1a7', 'p1a8', 'p1a9', 'p1a10', 'p1a11', 'p1a12', 'p1a13',
'p1a14', 'p1a15', 'p2a0', 'p2a1', 'p2a2', 'p2a3', 'p2a4', 'p2a5',
'p2a6', 'p2a7', 'p2a8', 'p2a9', 'p2a10', 'p2a11', 'p2a12', 'p2a13',
'p2a14','p2a15', 'p3a0', 'p3a1','p3a2', 'p3a3', 'p3a4', 'p3a5',
'p3a6', 'p3a7', 'p3a8','p3a9', 'p3a10', 'p3a11', 'p3a12', 'p3a13',
'p3a14', 'p3a15']
assert geom_dict['rigid_groups']['p0'] == quad_gr0[:16]
assert geom_dict['rigid_groups']['p3'] == quad_gr0[-16:]
assert geom_dict['rigid_groups']['q0'] == quad_gr0
def test_read_write_xfel_file_quadpos(tmpdir):
quad_pos = [(11.4, 299), (-11.5, 8), (254.5, -16), (278.5, 275)]
geom = LPD_1MGeometry.from_quad_positions(quad_pos)
path = str(tmpdir / 'lpd_geom.h5')
quad_pos_out = geom.to_h5_file_and_quad_positions(path)
np.testing.assert_allclose(quad_pos_out, quad_pos)
assert_isfile(path)
loaded = LPD_1MGeometry.from_h5_file_and_quad_positions(path, quad_pos_out)
assert_geom_close(loaded, geom)
def test_read_write_xfel_file(tmpdir):
quad_pos = [(11.4, 299), (-11.5, 8), (254.5, -16), (278.5, 275)]
geom = LPD_1MGeometry.from_quad_positions(quad_pos)
path = str(tmpdir / 'lpd_geom.h5')
geom.to_h5_file_and_quad_positions(path)
assert_isfile(path)
loaded = LPD_1MGeometry.from_h5_file(path)
assert_geom_close(loaded, geom)
def test_quad_positions_with_file():
path = pjoin(tests_dir, 'lpd_mar_18.h5')
quad_pos = [(11.4, 299), (-11.5, 8), (254.5, -16), (278.5, 275)]
geom = LPD_1MGeometry.from_h5_file_and_quad_positions(path, quad_pos)
np.testing.assert_allclose(geom.quad_positions(path), quad_pos)
def test_quad_positions_no_file():
quad_pos = [(11.4, 299), (-11.5, 8), (254.5, -16), (278.5, 275)]
geom = LPD_1MGeometry.from_quad_positions(quad_pos)
np.testing.assert_allclose(geom.quad_positions(), quad_pos)
def test_offset():
quad_pos = [(11.4, 299), (-11.5, 8), (254.5, -16), (278.5, 275)]
geom = LPD_1MGeometry.from_quad_positions(quad_pos)
y_orig = np.array([m[0].corner_pos[1] for m in geom.modules])
# Uniform shift for all modules, all tiles
all_shifted = geom.offset((0, 1e-3))
y1 = np.array([m[0].corner_pos[1] for m in all_shifted.modules])
np.testing.assert_allclose(y1, y_orig + 1e-3)
# Select some modules
q4_shifted = geom.offset((0, 2e-3), modules=np.s_[12:])
y2 = np.array([m[0].corner_pos[1] for m in q4_shifted.modules])
np.testing.assert_allclose(y2[:12], y_orig[:12])
np.testing.assert_allclose(y2[12:], y_orig[12:] + 2e-3)
quad_pos_modified = q4_shifted.quad_positions()
np.testing.assert_allclose(quad_pos_modified[:3], quad_pos[:3])
np.testing.assert_allclose(
quad_pos_modified[3], np.array(quad_pos[3]) + [0, 2] # quad positions in mm
)
# Per-module shift
q3_shifted = geom.offset(np.repeat([
(0, 0), (0, 0), (0, 2e-3), (0, 0),
], repeats=4, axis=0))
y3 = np.array([m[0].corner_pos[1] for m in q3_shifted.modules])
np.testing.assert_allclose(y3[:8], y_orig[:8])
np.testing.assert_allclose(y3[8:12], y_orig[8:12] + 2e-3)
np.testing.assert_allclose(y3[12:], y_orig[12:])
# Per-tile shift
shift = np.zeros((4, 16, 3), dtype=np.float64)
shift[:, 5, 1] = 3e-3 # Shift T6 of each module in y dimension
q2_t2_shifted = geom.offset(shift, modules=np.s_[4:8])
y_t1 = np.array([m[0].corner_pos[1] for m in q2_t2_shifted.modules])
np.testing.assert_allclose(y_t1, y_orig)
y_t6 = np.array([m[5].corner_pos[1] for m in q2_t2_shifted.modules])
y_t6_orig = np.array([m[5].corner_pos[1] for m in geom.modules])
np.testing.assert_allclose(y_t6[:4], y_t6_orig[:4])
np.testing.assert_allclose(y_t6[4:8], y_t6_orig[4:8] + 3e-3)
| np.testing.assert_allclose(y_t6[8:], y_t6_orig[8:]) | numpy.testing.assert_allclose |
import logging
from pathlib import Path
from typing import Union, List
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from collections import defaultdict
import scipy.stats as scst
from HPOBenchExperimentUtils import _log as _main_log
from HPOBenchExperimentUtils.utils.validation_utils import load_json_files, \
load_trajectories_as_df, df_per_optimizer
from HPOBenchExperimentUtils.utils.plotting_utils import plot_dc, color_per_opt, marker_per_opt,\
unify_layout
from HPOBenchExperimentUtils.utils.runner_utils import get_optimizer_setting, get_benchmark_settings
_main_log.setLevel(logging.DEBUG)
_log = logging.getLogger(__name__)
def plot_fidels(benchmark: str, output_dir: Union[Path, str], input_dir: Union[Path, str], opts: str,
opt_list: Union[List[str], None]=None, **kwargs):
_log.info(f'Plotting evaluated fidelities of benchmark {benchmark}')
input_dir = Path(input_dir) / benchmark
assert input_dir.is_dir(), f'Result folder doesn\"t exist: {input_dir}'
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
opt_rh_dc = load_trajectories_as_df(input_dir=input_dir,
which="runhistory")
if opt_list is None:
opt_list = list(opt_rh_dc.keys())
other_stats_dc = dict()
best_val = 1000
other_stats_dc["lowest"] = best_val
plt.figure(figsize=[5*len(opt_rh_dc), 5])
ax_old = None
for i, opt in enumerate(opt_rh_dc):
_log.info(f'Handling {opt}')
if opt not in opt_list:
_log.info(f'Skip {opt}')
continue
if len(opt_rh_dc) == 0: continue
other_stats_dc[opt] = defaultdict(list)
rhs = load_json_files(opt_rh_dc[opt])
for rh in rhs:
final_time = rh[-1]["finish_time"] - rh[0]["boot_time"]
bench_time = rh[-1]["total_time_used"]
calls = rh[-1]["function_call"]
other_stats_dc[opt]["final_time"].append(final_time)
other_stats_dc[opt]["bench_time"].append(bench_time)
other_stats_dc[opt]["calls"].append(calls)
df = df_per_optimizer(opt, rhs)
ax = plt.subplot(1, len(opt_rh_dc), i+1, sharey=ax_old)
thresh = 10000
if df.shape[0] > thresh:
sub = df[["fidel_values", "total_time_used"]].sample(n=thresh, random_state=1)
else:
sub = df[["fidel_values", "total_time_used"]]
avg = sub.shape[0] / len(df['id'].unique())
max_f = np.max(sub["fidel_values"])
vals = np.min(df.query('fidel_values == @max_f')["function_values"])
best_val = np.min([best_val, vals])
other_stats_dc["lowest"] = best_val
label = get_optimizer_setting(opt).get("display_name", opt)
plt.scatter(sub["total_time_used"], sub["fidel_values"], edgecolor=color_per_opt.get(opt, "k"), facecolor="none",
marker=marker_per_opt.get(opt, "o"), alpha=0.5,
label=label)
plt.xscale("log")
if get_benchmark_settings(benchmark)["is_surrogate"]:
plt.xlabel("Simulated runtime in seconds")
else:
plt.xlabel("Runtime in seconds")
unify_layout(ax, legend_args={"title": "%g evals on avg" % avg})
ax_old = ax
del rhs, df, sub
with open(Path(output_dir) / f'stats1_{benchmark}_{opts}.json', "w") as fh:
json.dump(other_stats_dc, fh, indent=4, sort_keys=True)
plt.ylabel("Fidelity")
plt.tight_layout()
plt.savefig(Path(output_dir) / f'fidel_{benchmark}_{opts}.png')
plt.close('all')
def plot_overhead(benchmark: str, output_dir: Union[Path, str], input_dir: Union[Path, str], opts: str,
opt_list: Union[List[str], None]=None, **kwargs):
_log.info(f'Start plotting overhead of benchmark {benchmark}')
input_dir = Path(input_dir) / benchmark
assert input_dir.is_dir(), f'Result folder doesn\"t exist: {input_dir}'
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
opt_rh_dc = load_trajectories_as_df(input_dir=input_dir,
which="runhistory")
if opt_list is None:
opt_list = list(opt_rh_dc.keys())
plt.figure(figsize=[5, 5])
a = plt.subplot(111)
for opt in opt_rh_dc:
_log.info(f'Handling {opt}')
if opt not in opt_list:
_log.info(f'Skip {opt}')
if len(opt_rh_dc) == 0: continue
rhs = load_json_files(opt_rh_dc[opt])
df = df_per_optimizer(opt, rhs)
nseeds = df['id'].unique()
for seed in nseeds:
steps = df[df['id'] == seed]["total_time_used"]
label = get_optimizer_setting(opt).get("display_name", opt)
benchmark_cost = df[df['id'] == seed]["finish_time"] - df[df['id'] == seed]["start_time"]
benchmark_cost = np.cumsum(benchmark_cost)
plt.plot(steps, benchmark_cost, color='k', alpha=0.5, zorder=99,
label=benchmark if seed == 0 and opt == list(opt_rh_dc.keys())[0] else None)
overhead = df[df['id'] == seed]["start_time"] - df[df['id'] == seed]["finish_time"].shift(1)
overhead = np.cumsum(overhead)
plt.plot(steps, overhead, color=color_per_opt.get(opt, "k"), linestyle=":", label=label if seed == 0 else None)
overall_cost = df[df['id'] == seed]["finish_time"] - df[df['id'] == seed]["start_time"].iloc[0]
benchmark_cost = np.cumsum(overall_cost)
plt.plot(steps, overall_cost, color=color_per_opt.get(opt, "k"), alpha=0.5, zorder=99,
label="%s overall" % label if seed == 0 else None)
a.set_yscale("log")
a.set_xscale("log")
if get_benchmark_settings(benchmark)["is_surrogate"]:
a.set_xlabel("Simulated runtime in seconds")
else:
a.set_xlabel("Runtime in seconds")
a.set_ylabel("Cumulated overhead in seconds")
a.set_xlim([1, a.set_xlim()[1]])
unify_layout(a)
plt.tight_layout()
plt.savefig(Path(output_dir) / f'overhead_{benchmark}_{opts}.png')
plt.close('all')
def plot_ecdf(benchmark: str, output_dir: Union[Path, str], input_dir: Union[Path, str], opts: str,
opt_list: Union[List[str], None] = None, **kwargs):
_log.info(f'Start plotting ECDFs for benchmark {benchmark}')
input_dir = Path(input_dir) / benchmark
Path(output_dir).mkdir(exist_ok=True, parents=True)
assert input_dir.is_dir(), f'Result folder doesn\"t exist: {input_dir}'
opt_rh_dc = load_trajectories_as_df(input_dir=input_dir,
which="runhistory")
benchmark_spec = plot_dc.get(benchmark, {})
y_best = benchmark_spec.get("ystar_valid", 0)
def ecdf(x):
xs = np.sort(x)
ys = np.arange(1, len(xs) + 1) / float(len(xs))
return xs, ys
plt.figure(figsize=[5, 5])
a = plt.subplot(111)
if opt_list is None:
opt_list = list(opt_rh_dc.keys())
for opt in opt_rh_dc:
if opt not in opt_list:
_log.info(f'Skip {opt}')
if len(opt_rh_dc) == 0: continue
rhs = load_json_files(opt_rh_dc[opt])
df = df_per_optimizer(opt, rhs, y_best=y_best)
color = color_per_opt.get(opt, "k")
obj_vals = df["function_values"]
x, y = ecdf(obj_vals.to_numpy())
label = get_optimizer_setting(opt).get("display_name", opt)
plt.plot(x, y, c=color, linewidth=2, label=label)
nseeds = df['id'].unique()
for seed in nseeds:
obj_vals = df[df['id'] == seed]["function_values"]
x, y = ecdf(obj_vals.to_numpy())
plt.plot(x, y, c=color, alpha=0.2)
if y_best != 0:
plt.xlabel("Optimization Regret")
else:
plt.xlabel("Optimization objective value")
plt.ylabel("P(x < X)")
yscale = benchmark_spec.get("yscale", "log")
plt.xscale(yscale)
unify_layout(a)
plt.tight_layout()
plt.grid(b=True, which="both", axis="both", alpha=0.5)
plt.savefig(Path(output_dir) / f'ecdf_{benchmark}_{opts}.png')
def plot_correlation(benchmark: str, output_dir: Union[Path, str], input_dir: Union[Path, str], opts: str,
opt_list: Union[List[str], None] = None, **kwargs):
_log.info(f'Start plotting correlations for benchmark {benchmark}')
input_dir = Path(input_dir) / benchmark
Path(output_dir).mkdir(exist_ok=True, parents=True)
assert input_dir.is_dir(), f'Result folder doesn\"t exist: {input_dir}'
opt_rh_dc = load_trajectories_as_df(input_dir=input_dir,
which="runhistory")
benchmark_spec = plot_dc.get(benchmark, {})
conf_dc = defaultdict(dict)
f_set = []
if opt_list is None:
opt_list = list(opt_rh_dc.keys())
for opt in opt_rh_dc:
if not ("smac_hb" in opt
or "dehb" in opt
or "hpbandster_bohb" in opt
or "hpbandster_hb" in opt
):
_log.info(f'Neither smac, dehb nor hpband: {opt}')
continue
if opt not in opt_list:
_log.info("Skip %s" % opt)
_log.info("Read %s" % opt)
if len(opt_rh_dc[opt]) == 0: continue
rhs = load_json_files(opt_rh_dc[opt])
for rh in rhs:
for record in rh[1:]:
c = json.dumps(record["configuration"], sort_keys=True)
f = record['fidelity'][list(record['fidelity'])[0]]
f_set.append(f)
conf_dc[c][f] = record["function_value"]
f_set = np.array(list(set(f_set)))
f_set.sort()
# Clean dc:
to_rm = []
for c in conf_dc:
if len(conf_dc[c]) < 2:
to_rm.append(c)
for c in to_rm:
del conf_dc[c]
# Start with computing correlations
cors = {}
for fi, f1 in enumerate(f_set):
cors[(f1, f1)] = (1, 0)
for f2 in f_set[fi+1:]:
a = []
b = []
for c in conf_dc:
if f1 in conf_dc[c] and f2 in conf_dc[c]:
a.append(conf_dc[c][f1])
b.append(conf_dc[c][f2])
c, _ = scst.spearmanr(a, b)
cors[(f1, f2)] = (c, len(a))
# Create plot
styles = [
('#99000d', "o", 2, 10, "-"),
('#cb181d', "^", 2, 10, "-"),
('#ef3b2c', "s", 2, 10, "-"),
('#fb6a4a', "*", 2, 10, "-"),
('#fc9272', "v", 2, 10, "-"),
('#fcbba1', "p", 2, 10, "-"),
('#fee5d9', "d", 2, 10, "-"),
]
plt.figure(figsize=[5, 5])
a = plt.subplot(111)
for fi, f in enumerate(f_set):
if len(f_set[fi:]) == 0: continue
c, m, lw, ms, ls = styles[fi]
a.plot(f_set[fi:], [cors[(f, f1)][0] for f1 in f_set[fi:]], label=f,
marker=m, linewidth=lw, linestyle=ls, markersize=ms, c=c)
#a.annotate("%d" % f, [f, 1.01], fontsize=15)
a.set_xlabel("Fidelity value")
a.set_ylabel("Spearman correlation coefficient")
a.set_ylim(benchmark_spec.get("cylim", [-1, 1]))
a.set_xscale("log")
unify_layout(a, legend_args={"title": "Fidelity value"})
plt.tight_layout()
plt.savefig(Path(output_dir) / f'correlation_{benchmark}.png')
# Create table
df = defaultdict(list)
for fi, f1 in enumerate(f_set[:-1]):
for fj, f2 in enumerate(f_set):
if fj < fi:
df[f1].append("-")
else:
df[f1].append("%.3g (%d)" % ( | np.round(cors[f1, f2][0], 3) | numpy.round |
import numpy as np
from time import time
from scipy.spatial.distance import pdist, cdist
from copy import deepcopy
import random
"""
Scripts to compute LJ energy and force
"""
def LJ(pos):
"""
Calculate the total energy
"""
distance = pdist(pos)
r6 = np.power(distance, 6)
r12 = np.multiply(r6, r6)
Eng = np.sum(4 * (1 / r12 - 1 / r6))
return Eng
def LJ_force(pos):
N_atom = len(pos)
force = np.zeros([N_atom, 3])
for i, pos0 in enumerate(pos):
pos1 = deepcopy(pos)
pos1 = np.delete(pos1, i, 0)
distance = cdist([pos0], pos1)
r = pos1 - pos0
r2 = | np.power(distance, 2) | numpy.power |
#!/usr/bin/env python
from __future__ import print_function, division
import math
import glob
import aplpy
import numpy as np
import itertools
import multiprocessing as mp
from astropy.wcs.utils import proj_plane_pixel_scales
from astropy import wcs
import astropy.units as u
from astropy.stats import sigma_clipped_stats
from astropy.nddata.utils import Cutout2D
from astropy.io import fits
from astropy.visualization import ZScaleInterval
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.collections import PatchCollection
from scipy.optimize import curve_fit
from photutils import aperture_photometry, CircularAperture
from photutils import Background2D, MedianBackground, make_source_mask
from qso_toolbox import utils as ut
from qso_toolbox import catalog_tools as ct
from qso_toolbox import photometry_tools as pt
def show_rectangles(fig, xw, yw, width, height, angle=0, layer=False,
zorder=None, coords_frame='world', **kwargs):
"""
Overlay rectangles on the current plot.
ATTENTION! THIS IS A MODIFIED VERSION OF THE ORIGINAL APLPY ROUTINE THAT
CORRECTLY ROTATES THE RECTANGLE AROUND ITS CENTER POSITION.
see https://github.com/aplpy/aplpy/pull/327
Parameters
----------
xw : list or `~numpy.ndarray`
The x positions of the centers of the rectangles (in world coordinates)
yw : list or `~numpy.ndarray`
The y positions of the centers of the rectangles (in world coordinates)
width : int or float or list or `~numpy.ndarray`
The width of the rectangle (in world coordinates)
height : int or float or list or `~numpy.ndarray`
The height of the rectangle (in world coordinates)
angle : int or float or list or `~numpy.ndarray`, optional
rotation in degrees (anti-clockwise). Default
angle is 0.0.
layer : str, optional
The name of the rectangle layer. This is useful for giving
custom names to layers (instead of rectangle_set_n) and for
replacing existing layers.
coords_frame : 'pixel' or 'world'
The reference frame in which the coordinates are defined. This is
used to interpret the values of ``xw``, ``yw``, ``width``, and
``height``.
kwargs
Additional keyword arguments (such as facecolor, edgecolor, alpha,
or linewidth) are passed to Matplotlib
:class:`~matplotlib.collections.PatchCollection` class, and can be
used to control the appearance of the rectangles.
"""
xw, yw, width, height, angle = aplpy.core.uniformize_1d(xw, yw, width,
height, angle)
if 'facecolor' not in kwargs:
kwargs.setdefault('facecolor', 'none')
if layer:
fig.remove_layer(layer, raise_exception=False)
if coords_frame not in ['pixel', 'world']:
raise ValueError("coords_frame should be set to 'pixel' or 'world'")
# While we could plot the shape using the get_transform('world') mode
# from WCSAxes, the issue is that the rotation angle is also measured in
# world coordinates so will not be what the user is expecting. So we
# allow the user to specify the reference frame for the coordinates and
# for the rotation.
if coords_frame == 'pixel':
x, y = xw, yw
w = width
h = height
a = angle
transform = fig.ax.transData
else:
x, y = fig.world2pixel(xw, yw)
pix_scale = aplpy.core.proj_plane_pixel_scales(fig._wcs)
sx, sy = pix_scale[fig.x], pix_scale[fig.y]
w = width / sx
h = height / sy
a = angle
transform = fig.ax.transData
# x = x - w / 2.
# y = y - h / 2.
#
# patches = []
# for i in range(len(x)):
# patches.append(Rectangle((x[i], y[i]), width=w[i], height=h[i],
# angle=a[i]))
xp = x - w / 2.
yp = y - h / 2.
radeg = np.pi / 180
xr = (xp - x) * np.cos((angle) * radeg) - (yp - y) * np.sin(
(angle) * radeg) + x
yr = (xp - x) * np.sin((angle) * radeg) + (yp - y) * np.cos(
(angle) * radeg) + y
patches = []
for i in range(len(xr)):
patches.append(
Rectangle((xr[i], yr[i]), width=w[i], height=h[i], angle=a[i]))
# Due to bugs in matplotlib, we need to pass the patch properties
# directly to the PatchCollection rather than use match_original.
p = PatchCollection(patches, transform=transform, **kwargs)
if zorder is not None:
p.zorder = zorder
c = fig.ax.add_collection(p)
if layer:
rectangle_set_name = layer
else:
fig._rectangle_counter += 1
rectangle_set_name = 'rectangle_set_' + str(fig._rectangle_counter)
fig._layers[rectangle_set_name] = c
return fig
# ------------------------------------------------------------------------------
# Plotting functions for image_cutouts
# ------------------------------------------------------------------------------
def open_image(filename, ra, dec, fov, image_folder_path, verbosity=0):
"""Opens an image defined by the filename with a fov of at least the
specified size (in arcseonds).
:param filename:
:param ra:
:param dec:
:param fov:
:param image_folder_path:
:param verbosity:
:return:
"""
filenames_available = glob.glob(filename)
file_found = False
open_file_fov = None
file_path = None
if len(filenames_available) > 0:
for filename in filenames_available:
try:
file_fov = int(filename.split("_")[3].split(".")[0][3:])
except:
file_fov = 9999999
if fov <= file_fov:
data, hdr = fits.getdata(filename, header=True)
file_found = True
file_path =filename
open_file_fov = file_fov
if file_found:
if verbosity > 0:
print("Opened {} with a fov of {} "
"arcseconds".format(file_path, open_file_fov))
return data, hdr, file_path
else:
if verbosity > 0:
print("File {} in folder {} not found. Target with RA {}"
" and Decl {}".format(filename, image_folder_path,
ra, dec))
return None, None, None
def make_mult_png_fig(ra, dec, surveys, bands,
fovs, apertures, square_sizes, image_folder_path, mag_list=None,
magerr_list=None, sn_list=None,
forced_mag_list=None, forced_magerr_list=None,
forced_sn_list=None, n_col=3,
n_sigma=3, color_map_name='viridis',
add_info_label=None, add_info_value=None, verbosity=0):
"""Create a figure to plot cutouts for one source in all specified surveys
and bands.
:param ra: float
Right Ascension of the target
:param dec: float
Declination of the target
:param surveys: list of strings
List of survey names, length has to be equal to bands and fovs
:param bands: list of strings
List of band names, length has to be equal to surveys and fovs
:param fovs: list of floats
Field of view in arcseconds of image cutouts, length has be equal to
surveys, bands and apertures.
:param apertures: list of floats
List of apertures in arcseconds for forced photometry calculated,
length has to be equal to surveys, bands and fovs
:param square_sizes: list of floats
List of
:param image_folder_path: string
Path to the directory where all the images are be stored
:param mag_list: list of floats
List of magnitudes for each survey/band
:param magerr_list: list of floats
List of magnitude errors for each survey/band
:param sn_list: list of floats
List of S/N for each survey/band
:param forced_mag_list: list of floats
List of forced magnitudes for each survey/band
:param forced_magerr_list: list of floats
List of forced magnitude errors for each survey/band
:param forced_sn_list: list of floats
List of forced S/N for each survey/band
:param n_col: int
Number of columns
:param n_sigma: int
Number of sigmas for the sigma-clipping routine that creates the
boundaries for the color map.
:param color_map_name: string
Name of the color map
:param add_info_value : string
Value for additional information added to the title of the figure
:param add_info_label : string
Label for additional information added to the title of the figure
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: matplotlib.figure
Figure with the plot.
"""
n_images = len(surveys)
n_row = int(math.ceil(n_images / n_col))
fig = plt.figure(figsize=(5*n_col, 5*n_row))
fig = _make_mult_png_axes(fig, n_row, n_col, ra, dec, surveys, bands,
fovs, apertures, square_sizes, image_folder_path, mag_list,
magerr_list, sn_list,
forced_mag_list, forced_magerr_list,
forced_sn_list, n_sigma, color_map_name, verbosity)
coord_name = ut.coord_to_name(np.array([ra]),
np.array([dec]),
epoch="J")
if add_info_label is None or add_info_value is None:
fig.suptitle(coord_name[0])
else:
fig.suptitle(coord_name[0]+' '+add_info_label+'='+add_info_value)
return fig
def _make_mult_png_axes(fig, n_row, n_col, ra, dec, surveys, bands,
fovs, apertures, square_sizes, image_folder_path, mag_list=None,
magerr_list=None, sn_list=None,
forced_mag_list=None, forced_magerr_list=None,
forced_sn_list=None,
n_sigma=3, color_map_name='viridis', verbosity=0):
""" Create axes components to plot one source in all specified surveys
and bands.
:param fig: matplotlib.figure
Figure
:param n_row: int
Number of rows
:param n_col: int
Number of columns
:param ra: float
Right Ascension of the target
:param dec: float
Declination of the target
:param surveys: list of strings
List of survey names, length has to be equal to bands and fovs
:param bands: list of strings
List of band names, length has to be equal to surveys and fovs
:param fovs: list of floats
Field of view in arcseconds of image cutouts, length has be equal to
surveys, bands and apertures.
:param apertures: list of floats
List of apertures in arcseconds for forced photometry calculated,
length has to be equal to surveys, bands and fovs
:param square_sizes: list of floats
List of
:param image_folder_path: string
Path to the directory where all the images are be stored
:param mag_list: list of floats
List of magnitudes for each survey/band
:param magerr_list: list of floats
List of magnitude errors for each survey/band
:param sn_list: list of floats
List of S/N for each survey/band
:param forced_mag_list: list of floats
List of forced magnitudes for each survey/band
:param forced_magerr_list: list of floats
List of forced magnitude errors for each survey/band
:param forced_sn_list: list of floats
List of forced S/N for each survey/band
:param n_col: int
Number of columns
:param n_sigma: int
Number of sigmas for the sigma-clipping routine that creates the
boundaries for the color map.
:param color_map_name: string
Name of the color map
:param verbosity:
Verbosity > 0 will print verbose statements during the execution
:return: matplotlib.figure
Figure with the plot.
"""
for idx, survey in enumerate(surveys):
band = bands[idx]
fov = fovs[idx]
aperture = apertures[idx]
size = square_sizes[idx]
if mag_list is not None:
catmag = mag_list[idx]
else:
catmag = None
if magerr_list is not None:
caterr = magerr_list[idx]
else:
caterr = None
if sn_list is not None:
catsn = sn_list[idx]
else:
catsn = None
if forced_mag_list is not None:
forced_mag = forced_mag_list[idx]
else:
forced_mag = None
if forced_magerr_list is not None:
forced_magerr = forced_magerr_list[idx]
else:
forced_magerr = None
if forced_sn_list is not None:
forced_sn = forced_sn_list[idx]
else:
forced_sn = None
# Get the correct filename, accept larger fovs
coord_name = ut.coord_to_name(np.array([ra]), np.array([dec]),
epoch="J")
filename = image_folder_path + '/' + coord_name[0] + "_" + survey + "_" + \
band + "*fov*.fits"
data, hdr, file_path = open_image(filename, ra, dec, fov,
image_folder_path,
verbosity)
if data is not None and hdr is not None:
file_found = True
else:
file_found = False
# Old plotting routine to modify, currently it only plots images for
# surveys and bands that it could open, no auto download implemented
if file_found:
wcs_img = wcs.WCS(hdr)
pixcrd = wcs_img.wcs_world2pix(ra, dec, 0)
positions = (np.float(pixcrd[0]), np.float(pixcrd[1]))
overlap = True
if verbosity >= 4:
print("[DIAGNOSTIC] Image file shape {}".format(data.shape))
try:
img_stamp = Cutout2D(data, positions, size=fov * u.arcsec,
wcs=wcs_img)
if verbosity >= 4:
print("[DIAGNOSTIC] Cutout2D file shape {}".format(
img_stamp.shape))
except:
print("Source not in image")
overlap = False
img_stamp = None
if img_stamp is not None:
if overlap:
img_stamp = img_stamp.data
hdu = fits.ImageHDU(data=img_stamp, header=hdr)
axs = aplpy.FITSFigure(hdu, figure=fig,
subplot=(n_row, n_col, idx + 1),
north=True)
# Check if input color map name is a color map, else use viridis
try:
cm = plt.get_cmap(color_map_name)
except ValueError:
print('Color map argument is not a color map. Setting '
'default: viridis')
cm = plt.get_cmap('viridis')
color_map_name = 'viridis'
# Sigma-clipping of the color scale
mean = np.mean(img_stamp[~np.isnan(img_stamp)])
std = np.std(img_stamp[~np.isnan(img_stamp)])
upp_lim = mean + n_sigma * std
low_lim = mean - n_sigma * std
axs.show_colorscale(vmin=low_lim, vmax=upp_lim,
cmap=color_map_name)
# Plot circular aperture (forced photometry flux)
(yy, xx) = img_stamp.shape
circx = (xx * 0.5) # + 1
circy = (yy * 0.5) # + 1
aper_pix = aperture_inpixels(aperture, hdr)
circle = plt.Circle((circx, circy), aper_pix, color='r', fill=False,
lw=1.5)
fig.gca().add_artist(circle)
# Plot rectangular aperture (error region)
rect_inpixels = aperture_inpixels(size, hdr)
square = plt.Rectangle((circx - rect_inpixels * 0.5,
circy - rect_inpixels * 0.5),
rect_inpixels, rect_inpixels,
color='r', fill=False, lw=1.5)
fig.gca().add_artist(square)
# Create forced photometry label
if (forced_mag is not None):
if (forced_sn is not None) & (forced_magerr is not None):
forcedlabel = r'${0:s} = {1:.2f} \pm {2:.2f} (SN=' \
r'{3:.1f})$'.format(band + "_{forced}",
forced_mag,
forced_magerr,
forced_sn)
elif forced_magerr is not None:
forcedlabel = r'${0:s} = {1:.2f} \pm {2:.2f}$'.format(
band + "_{forced}", forced_mag, forced_magerr)
else:
forcedlabel = r'${0:s} = {1:.2f}$'.format(
band + "_{forced}", forced_mag)
fig.gca().text(0.03, 0.16, forcedlabel, color='black',
weight='bold', fontsize='large',
bbox=dict(facecolor='white', alpha=0.6),
transform=fig.gca().transAxes)
# Create catalog magnitude label
if catmag is not None:
if (catsn is not None) & (caterr is not None):
maglabel = r'${0:s} = {1:.2f} \pm {2:.2f} (SN=' \
r'{3:.2f})$'.format(
band + "_{cat}", catmag, caterr, catsn)
elif caterr is not None:
maglabel = r'${0:s} = {1:.2f} \pm {2:.2f}$'.format(
band + "_{cat}", catmag, caterr)
else:
maglabel = r'${0:s} = {1:.2f}$'.format(
band + "_{cat}", catmag)
fig.gca().text(0.03, 0.04, maglabel, color='black',
weight='bold',
fontsize='large',
bbox=dict(facecolor='white', alpha=0.6),
transform=fig.gca().transAxes)
fig.gca().set_title(survey + " " + band)
return fig
# ------------------------------------------------------------------------------
# Finding Chart plotting routine
# ------------------------------------------------------------------------------
def make_finding_charts(table, ra_column_name, dec_column_name,
target_column_name, survey, band,
aperture, fov, image_folder_path,
offset_table=None,
offset_id = 0,
offset_focus = False,
offset_ra_column_name=None,
offset_dec_column_name=None,
pos_angle_column_name=None,
offset_mag_column_name=None,
offset_id_column_name=None,
# offset_finding_chart=True,
label_position='bottom',
slit_width=None,
slit_length=None,
format ='pdf',
auto_download=False, verbosity=0):
"""Create and save finding charts plots for all targets in the input table.
:param table: pandas.core.frame.DataFrame
Dataframe with targets to plot finding charts for
:param ra_column_name: string
Right ascension column name
:param dec_column_name: string
Declination column name
:param target_column_name: string
Name of the target identifier column
:param survey: string
Survey name
:param band: string
Passband name
:param aperture: float
Aperture to plot in arcseconds
:param fov: float
Field of view in arcseconds
:param image_folder_path: string
Path to where the image will be stored
:param offset_table: pandas.core.frame.DataFrame
Pandas dataframe with offset star information for all targets
:param offset_id: int
Integer indicating the primary offset from the offset table
:param offset_focus: boolean
Boolean to indicate whether offset star will be in the center or not
:param offset_ra_column_name: string
Offset star dataframe right ascension column name
:param offset_dec_column_name: string
Offset star dataframe declination column name
:param pos_angle_column_name: string
Offset star dataframe position angle column name
:param offset_mag_column_name: string
Offset star dataframe magnitude column name
:param offset_id_column_name: string
Offset star dataframe identifier column name
:param label_position: string
String that defines the label position for the offset stars.
Possible label positions are ["left", "right", "top", "bottom",
"topleft"]
:param slit_width: float
Slit width in arcseconds.
:param slit_length: float
Slit length in arcseconds
:param format: string
A string indicating in which format the finding charts are save.
Possible formats: 'pdf', 'png'
:param auto_download: boolean
Boolean to indicate whether images should be automatically downloaded.
:param verbosity:
Verbosity > 0 will print verbose statements during the execution.
"""
surveys = [survey]
bands = [band]
fovs = [fov]
print(offset_table)
print(table)
for idx in table.index:
ra = table.loc[idx, ra_column_name]
dec = table.loc[idx, dec_column_name]
target_name = table.loc[idx, target_column_name]
if offset_table is not None:
offset_target = offset_table.query('target_name=="{}"'.format(
target_name))
# Set position angle
if len(offset_target) > 0:
if pos_angle_column_name is not None:
position_angle = offset_target.loc[offset_target.index[0],
pos_angle_column_name]
else:
target_coords = SkyCoord(ra=ra, dec=dec,
unit=(u.deg, u.deg),
frame='icrs')
offset_coords = SkyCoord(ra=offset_target.loc[:,
offset_ra_column_name].values,
dec=offset_target.loc[:,
offset_dec_column_name].values,
unit=(u.deg, u.deg),
frame='icrs')
# Calculate position angles(East of North)
pos_angles = offset_coords.position_angle(target_coords).to(
u.deg)
# Take position angle to offset_id star in list
position_angle = pos_angles[offset_id].to(u.deg).value
else:
position_angle = 0
offset_target = None
else:
offset_target = None
position_angle = 0
if offset_target is not None:
offset_target.reset_index(inplace=True, drop=True)
if auto_download:
if offset_focus:
ct.get_photometry(offset_target.loc[[0]],
offset_ra_column_name,
offset_dec_column_name,
surveys,
bands,
image_folder_path,
fovs,
# n_jobs=1,
verbosity=verbosity)
else:
ct.get_photometry(table.loc[[idx]],
ra_column_name,
dec_column_name,
surveys,
bands,
image_folder_path,
fovs,
# n_jobs=1,
verbosity=verbosity)
fig = make_finding_chart(ra, dec, survey, band, aperture, fov,
image_folder_path,
offset_df=offset_target,
offset_id=offset_id,
offset_focus=offset_focus,
offset_ra_column_name=offset_ra_column_name,
offset_dec_column_name=offset_dec_column_name,
offset_mag_column_name=offset_mag_column_name,
offset_id_column_name=offset_id_column_name,
label_position=label_position,
slit_width=slit_width,
slit_length=slit_length,
position_angle=position_angle,
verbosity=verbosity)
if format == 'pdf':
fig.save('fc_{}.pdf'.format(target_name), transparent=False)
if format == 'png':
fig.save('fc_{}.png'.format(target_name), transparent=False)
print('{} created'.format('fc_{}'.format(target_name)))
def make_finding_chart(ra, dec, survey, band, aperture, fov,
image_folder_path,
offset_df=None,
offset_id=0,
offset_focus=False,
offset_ra_column_name=None,
offset_dec_column_name=None,
offset_mag_column_name=None,
offset_id_column_name=None,
label_position='bottom',
slit_width=None, slit_length=None,
position_angle=None, verbosity=0):
"""Make the finding chart figure and return it.
This is an internal function, but can be used to create one finding chart.
:param ra: float
Right ascension of the target in decimal degrees
:param dec: float
Declination of the target in decimal degrees
:param survey: string
Survey name
:param band: string
Passband name
:param aperture: float
Size of the plotted aperture in arcseconds
:param fov: float
Field of view in arcseconds
:param image_folder_path: string
Path to where the image will be stored
:param offset_df: pandas.core.frame.DataFrame
Pandas dataframe with offset star information
:param offset_id: int
Integer indicating the primary offset from the offset table
:param offset_focus: boolean
Boolean to indicate whether offset star will be in the center or not
:param offset_ra_column_name: string
Offset star dataframe right ascension column name
:param offset_dec_column_name: string
Offset star dataframe declination column name
:param offset_mag_column_name: string
Offset star dataframe magnitude column name
:param offset_id_column_name: string
Offset star dataframe identifier column name
:param label_position: string
String that defines the label position for the offset stars.
Possible label positions are ["left", "right", "top", "bottom",
"topleft"]
:param slit_width: float
Slit width in arcseconds.
:param slit_length: float
Slit length in arcseconds
:param position_angle:
Position angle for the observation.
:param verbosity:
Verbosity > 0 will print verbose statements during the execution.
:return: matplotlib.figure
Return the matplotlib figure of the finding chart.
"""
if offset_focus:
im_ra = offset_df.loc[offset_id, offset_ra_column_name]
im_dec = offset_df.loc[offset_id, offset_dec_column_name]
else:
im_ra = ra
im_dec = dec
coord_name = ut.coord_to_name(np.array([im_ra]), np.array([im_dec]),
epoch="J")
filename = image_folder_path + '/' + coord_name[0] + "_" + survey + "_" + \
band + "*.fits"
data, hdr, file_path = open_image(filename, im_ra, im_dec,
fov,
image_folder_path,
verbosity=verbosity)
# Reproject data if position angle is specified
if position_angle != 0:
hdr['CRPIX1'] = int(hdr['NAXIS1'] / 2.)
hdr['CRPIX2'] = int(hdr['NAXIS2'] / 2.)
hdr['CRVAL1'] = im_ra
hdr['CRVAL2'] = im_dec
new_hdr = hdr.copy()
pa_rad = np.deg2rad(position_angle)
# TODO: Note that the rotation definition here reflects one axis
# TODO: to make sure that it is a rotated version of north up east left
# TODO: both 001 components have a negative sign!
new_hdr['PC001001'] = -np.cos(pa_rad)
new_hdr['PC001002'] = | np.sin(pa_rad) | numpy.sin |
import time
import numpy as np
from sklearn.datasets import make_classification
from logreg import logreg_path
from sklearn.datasets.mldata import fetch_mldata
from sklearn.datasets import load_svmlight_file
# import pandas as pd
from blitz_path import blitz_path
import matplotlib.pyplot as plt
from matplotlib import rc
plt.close('all')
plt.style.use('ggplot')
fontsize = 13
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Computer Modern Roman']})
params = {'axes.labelsize': 20,
'font.size': 15,
'legend.fontsize': 15,
'xtick.labelsize': 15,
'ytick.labelsize': 15,
'text.usetex': True,
'text.latex.preamble': r'\usepackage{amsmath}'}
plt.rcParams.update(params)
dataset_id = 2
bench_time = 1
bench_active_set = 0
display_time = 0
if dataset_id == 1:
dataset = "synthetic"
X, y = make_classification(n_samples=50,
n_features=3000,
n_classes=2,
random_state=42)
X = X.astype(float)
X /= np.sqrt(np.sum(X ** 2, axis=0))
mask = np.sum(np.isnan(X), axis=0) == 0
if np.any(mask):
X = X[:, mask]
y = y.astype(float)
y_blitz = 2 * y - 1 # blitz's label = +-1
eps = 1e-3 # the smaller it is the longer is the path
elif dataset_id == 2:
dataset = "leukemia"
data = fetch_mldata(dataset)
X = data.data # [:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b]
y = data.target
X = X.astype(float)
y = y.astype(float)
y_blitz = y.copy() # blitz's label = +-1
y[y == -1] = 0
eps = 1e-3 # the smaller it is the longer is the path
elif dataset_id == 3:
# download the file here
# http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary.html#news20.binary
X, y = load_svmlight_file("data/news20.binary")
X = X.astype(float)
y = y.astype(float)
y[y == -1] = 0
alpha_max = np.linalg.norm(np.dot(X.T, 0.5 - y), ord=np.inf)
alpha_max_blitz = np.linalg.norm( | np.dot(X.T, y_blitz) | numpy.dot |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
df = pd.read_csv('wdbc.data', header=None)
X = df.loc[:, 2:].values
y = df.loc[:, 1].values
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, stratify=y, random_state=1)
# Debugging algorithms with learning curves
# Diagnosing bias and variance problems with learning curves
pipe_lr = make_pipeline(StandardScaler(),
LogisticRegression(penalty='l2', random_state=1,
solver='lbfgs', max_iter=10000))
train_sizes, train_scores, test_scores = learning_curve(estimator=pipe_lr, X=X_train, y=y_train,
train_sizes=np.linspace(0.1, 1.0, 10), cv=10, n_jobs=1)
train_mean = | np.mean(train_scores, axis=1) | numpy.mean |
import numpy as np
from nuspacesim.config import NssConfig
from nuspacesim.simulation.geometry import region_geometry
from . import cpp_region_geometry
def test_rg():
cfg = NssConfig()
u = np.random.rand(int(1e6), 4)
T = region_geometry.RegionGeom(cfg)
T.throw(u.T)
R = cpp_region_geometry.RegionGeom(cfg)
R.run_geo_dmc_from_ran_array_nparray(u)
Rmsk = R.evMasknpArray
Tmsk = T.event_mask
assert all(Rmsk == Tmsk)
assert np.allclose(R.evArray["thetaS"][Rmsk], T.thetaS[Tmsk])
assert np.allclose(R.evArray["phiS"][Rmsk], T.phiS[Tmsk])
assert np.allclose(R.evArray["raS"][Rmsk], T.raS[Tmsk])
assert np.allclose(R.evArray["decS"][Rmsk], T.decS[Tmsk])
assert np.allclose(R.evArray["thetaTrSubV"][Rmsk], T.thetaTrSubV[Tmsk])
assert np.allclose(R.evArray["costhetaTrSubV"][Rmsk], T.costhetaTrSubV[Tmsk])
assert np.allclose(R.evArray["phiTrSubV"][Rmsk], T.phiTrSubV[Tmsk])
assert np.allclose(R.evArray["thetaTrSubN"][Rmsk], T.thetaTrSubN[Tmsk])
assert np.allclose(R.evArray["costhetaTrSubN"][Rmsk], T.costhetaTrSubN[Tmsk])
assert np.allclose(R.evArray["betaTrSubN"][Rmsk], T.betaTrSubN[Tmsk])
assert np.allclose(R.evArray["losPathLen"][Rmsk], T.losPathLen[Tmsk])
assert np.allclose(R.evArray["thetaTrSubV"][Rmsk], T.thetaTrSubV[Tmsk])
assert np.allclose(R.evArray["costhetaTrSubV"][Rmsk], T.costhetaTrSubV[Tmsk])
assert not np.allclose(R.evArray["thetaS"][Rmsk], 1 + T.thetaS[Tmsk])
assert np.allclose(R.betas(), T.betas())
assert np.allclose(R.beta_rad(), T.beta_rad())
assert np.allclose(R.thetas(), T.thetas())
assert np.allclose(R.pathLens(), T.pathLens())
triggers = np.random.uniform(size=int(1e6))[Tmsk]
costheta = np.random.normal(size=int(1e6))[Tmsk]
tauexitprob = np.random.uniform(size=int(1e6))[Tmsk]
threshold = 1 / 10
Rmci, Rmcig, Rpass = R.mcintegral(triggers, costheta, tauexitprob, threshold)
Tmci, Tmcig, Tpass = T.mcintegral(triggers, costheta, tauexitprob, threshold)
assert | np.allclose(Rmci, Tmci) | numpy.allclose |
# MIT License
#
# Copyright (c) 2018-2021 Tskit Developers
# Copyright (c) 2015-2018 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Test cases for the low level C interface to tskit.
"""
import collections
import inspect
import itertools
import os
import random
import tempfile
import msprime
import numpy as np
import pytest
import _tskit
import tskit
def get_tracked_sample_counts(st, tracked_samples):
"""
Returns a list giving the number of samples in the specified list
that are in the subtree rooted at each node.
"""
nu = [0 for j in range(st.get_num_nodes())]
for j in tracked_samples:
# Duplicates not permitted.
assert nu[j] == 0
u = j
while u != _tskit.NULL:
nu[u] += 1
u = st.get_parent(u)
return nu
def get_sample_counts(tree_sequence, st):
"""
Returns a list of the sample node counts for the specified tree.
"""
nu = [0 for j in range(st.get_num_nodes())]
for j in range(tree_sequence.get_num_samples()):
u = j
while u != _tskit.NULL:
nu[u] += 1
u = st.get_parent(u)
return nu
class LowLevelTestCase:
"""
Superclass of tests for the low-level interface.
"""
def verify_tree_dict(self, n, pi):
"""
Verifies that the specified tree in dict format is a
consistent coalescent history for a sample of size n.
"""
assert len(pi) <= 2 * n - 1
# _tskit.NULL should not be a node
assert _tskit.NULL not in pi
# verify the root is equal for all samples
root = 0
while pi[root] != _tskit.NULL:
root = pi[root]
for j in range(n):
k = j
while pi[k] != _tskit.NULL:
k = pi[k]
assert k == root
# 0 to n - 1 inclusive should always be nodes
for j in range(n):
assert j in pi
num_children = collections.defaultdict(int)
for j in pi.keys():
num_children[pi[j]] += 1
# nodes 0 to n are samples.
for j in range(n):
assert pi[j] != 0
assert num_children[j] == 0
# All non-sample nodes should be binary
for j in pi.keys():
if j > n:
assert num_children[j] >= 2
def get_example_tree_sequence(
self, sample_size=10, length=1, mutation_rate=1, random_seed=1
):
ts = msprime.simulate(
sample_size,
recombination_rate=0.1,
mutation_rate=mutation_rate,
random_seed=random_seed,
length=length,
)
return ts.ll_tree_sequence
def get_example_tree_sequences(self):
yield self.get_example_tree_sequence()
yield self.get_example_tree_sequence(2, 10)
yield self.get_example_tree_sequence(20, 10)
yield self.get_example_migration_tree_sequence()
def get_example_migration_tree_sequence(self):
pop_configs = [msprime.PopulationConfiguration(5) for _ in range(2)]
migration_matrix = [[0, 1], [1, 0]]
ts = msprime.simulate(
population_configurations=pop_configs,
migration_matrix=migration_matrix,
mutation_rate=1,
record_migrations=True,
random_seed=1,
)
return ts.ll_tree_sequence
def verify_iterator(self, iterator):
"""
Checks that the specified non-empty iterator implements the
iterator protocol correctly.
"""
list_ = list(iterator)
assert len(list_) > 0
for _ in range(10):
with pytest.raises(StopIteration):
next(iterator)
class MetadataTestMixin:
metadata_tables = [
"node",
"edge",
"site",
"mutation",
"migration",
"individual",
"population",
]
class TestTableCollection(LowLevelTestCase):
"""
Tests for the low-level TableCollection class
"""
def test_file_errors(self):
tc1 = _tskit.TableCollection(1)
self.get_example_tree_sequence().dump_tables(tc1)
def loader(*args):
tc = _tskit.TableCollection(1)
tc.load(*args)
for func in [tc1.dump, loader]:
with pytest.raises(TypeError):
func()
for bad_type in [None, [], {}]:
with pytest.raises(TypeError):
func(bad_type)
def test_dump_equality(self, tmp_path):
for ts in self.get_example_tree_sequences():
tc = _tskit.TableCollection(sequence_length=ts.get_sequence_length())
ts.dump_tables(tc)
with open(tmp_path / "tmp.trees", "wb") as f:
tc.dump(f)
with open(tmp_path / "tmp.trees", "rb") as f:
tc2 = _tskit.TableCollection()
tc2.load(f)
assert tc.equals(tc2)
def test_reference_deletion(self):
ts = msprime.simulate(10, mutation_rate=1, random_seed=1)
tc = ts.tables._ll_tables
# Get references to all the tables
tables = [
tc.individuals,
tc.nodes,
tc.edges,
tc.migrations,
tc.sites,
tc.mutations,
tc.populations,
tc.provenances,
]
del tc
for _ in range(10):
for table in tables:
assert len(str(table)) > 0
def test_set_sequence_length_errors(self):
tables = _tskit.TableCollection(1)
with pytest.raises(TypeError):
del tables.sequence_length
for bad_value in ["sdf", None, []]:
with pytest.raises(TypeError):
tables.sequence_length = bad_value
def test_set_sequence_length(self):
tables = _tskit.TableCollection(1)
assert tables.sequence_length == 1
for value in [-1, 1e6, 1e-22, 1000, 2 ** 32, -10000]:
tables.sequence_length = value
assert tables.sequence_length == value
def test_set_metadata_errors(self):
tables = _tskit.TableCollection(1)
with pytest.raises(AttributeError):
del tables.metadata
for bad_value in ["bytes only", 59, 43.4, None, []]:
with pytest.raises(TypeError):
tables.metadata = bad_value
def test_set_metadata(self):
tables = _tskit.TableCollection(1)
assert tables.metadata == b""
for value in [b"foo", b"", "💩".encode(), b"null char \0 in string"]:
tables.metadata = value
tables.metadata_schema = "Test we have two separate fields"
assert tables.metadata == value
def test_set_metadata_schema_errors(self):
tables = _tskit.TableCollection(1)
with pytest.raises(AttributeError):
del tables.metadata_schema
for bad_value in [59, 43.4, None, []]:
with pytest.raises(TypeError):
tables.metadata_schema = bad_value
def test_set_metadata_schema(self):
tables = _tskit.TableCollection(1)
assert tables.metadata_schema == ""
for value in ["foo", "", "💩", "null char \0 in string"]:
tables.metadata_schema = value
tables.metadata = b"Test we have two separate fields"
assert tables.metadata_schema == value
def test_simplify_bad_args(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
with pytest.raises(TypeError):
tc.simplify()
with pytest.raises(ValueError):
tc.simplify("asdf")
with pytest.raises(TypeError):
tc.simplify([0, 1], keep_unary="sdf")
with pytest.raises(TypeError):
tc.simplify([0, 1], keep_unary_in_individuals="abc")
with pytest.raises(TypeError):
tc.simplify([0, 1], keep_input_roots="sdf")
with pytest.raises(TypeError):
tc.simplify([0, 1], filter_populations="x")
with pytest.raises(_tskit.LibraryError):
tc.simplify([0, -1])
def test_link_ancestors_bad_args(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
with pytest.raises(TypeError):
tc.link_ancestors()
with pytest.raises(TypeError):
tc.link_ancestors([0, 1])
with pytest.raises(ValueError):
tc.link_ancestors(samples=[0, 1], ancestors="sdf")
with pytest.raises(ValueError):
tc.link_ancestors(samples="sdf", ancestors=[0, 1])
with pytest.raises(_tskit.LibraryError):
tc.link_ancestors(samples=[0, 1], ancestors=[11, -1])
with pytest.raises(_tskit.LibraryError):
tc.link_ancestors(samples=[0, -1], ancestors=[11])
def test_link_ancestors(self):
ts = msprime.simulate(2, random_seed=1)
tc = ts.tables._ll_tables
edges = tc.link_ancestors([0, 1], [3])
assert isinstance(edges, _tskit.EdgeTable)
del edges
assert tc.edges.num_rows == 2
def test_subset_bad_args(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
with pytest.raises(TypeError):
tc.subset(np.array(["a"]))
with pytest.raises(ValueError):
tc.subset(np.array([[1], [2]], dtype="int32"))
with pytest.raises(TypeError):
tc.subset()
with pytest.raises(_tskit.LibraryError):
tc.subset(np.array([100, 200], dtype="int32"))
def test_union_bad_args(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
tc2 = tc
with pytest.raises(TypeError):
tc.union(tc2, np.array(["a"]))
with pytest.raises(ValueError):
tc.union(tc2, np.array([0], dtype="int32"))
with pytest.raises(TypeError):
tc.union(tc2)
with pytest.raises(TypeError):
tc.union()
node_mapping = np.arange(ts.num_nodes, dtype="int32")
node_mapping[0] = 1200
with pytest.raises(_tskit.LibraryError):
tc.union(tc2, node_mapping)
node_mapping = np.array(
[node_mapping.tolist(), node_mapping.tolist()], dtype="int32"
)
with pytest.raises(ValueError):
tc.union(tc2, node_mapping)
with pytest.raises(ValueError):
tc.union(tc2, np.array([[1], [2]], dtype="int32"))
def test_equals_bad_args(self):
ts = msprime.simulate(10, random_seed=1242)
tc = ts.tables._ll_tables
with pytest.raises(TypeError):
tc.equals()
with pytest.raises(TypeError):
tc.equals(None)
assert tc.equals(tc)
with pytest.raises(TypeError):
tc.equals(tc, no_such_arg=1)
bad_bool = "x"
with pytest.raises(TypeError):
tc.equals(tc, ignore_metadata=bad_bool)
with pytest.raises(TypeError):
tc.equals(tc, ignore_ts_metadata=bad_bool)
with pytest.raises(TypeError):
tc.equals(tc, ignore_provenance=bad_bool)
with pytest.raises(TypeError):
tc.equals(tc, ignore_timestamps=bad_bool)
def test_asdict(self):
for ts in self.get_example_tree_sequences():
tc = _tskit.TableCollection(sequence_length=ts.get_sequence_length())
ts.dump_tables(tc)
d = tc.asdict()
# Method is tested extensively elsewhere, just basic sanity check here
assert isinstance(d, dict)
assert len(d) > 0
def test_fromdict(self):
for ts in self.get_example_tree_sequences():
tc1 = _tskit.TableCollection(sequence_length=ts.get_sequence_length())
ts.dump_tables(tc1)
d = tc1.asdict()
tc2 = _tskit.TableCollection(sequence_length=0)
tc2.fromdict(d)
assert tc1.equals(tc2)
def test_asdict_bad_args(self):
ts = msprime.simulate(10, random_seed=1242)
tc = ts.tables._ll_tables
for bad_type in [None, 0.1, "str"]:
with pytest.raises(TypeError):
tc.asdict(force_offset_64=bad_type)
def test_fromdict_bad_args(self):
tc = _tskit.TableCollection(0)
for bad_type in [None, 0.1, "str"]:
with pytest.raises(TypeError):
tc.fromdict(bad_type)
class TestIbd:
def test_uninitialised(self):
result = _tskit.IbdResult.__new__(_tskit.IbdResult)
with pytest.raises(SystemError):
result.get(0, 1)
with pytest.raises(SystemError):
result.print_state()
with pytest.raises(SystemError):
result.total_segments
def test_find_bad_args(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
with pytest.raises(TypeError):
tc.find_ibd()
for bad_samples in ["sdf", None, {}]:
with pytest.raises(ValueError):
tc.find_ibd(bad_samples)
for not_enough_samples in [[], [0]]:
with pytest.raises(ValueError):
tc.find_ibd(not_enough_samples)
# input array must be 2D
with pytest.raises(ValueError):
tc.find_ibd([[[1], [1]]])
# Input array must be (n, 2)
with pytest.raises(ValueError):
tc.find_ibd([[1, 1, 1]])
for bad_float in ["sdf", None, {}]:
with pytest.raises(TypeError):
tc.find_ibd([(0, 1)], min_length=bad_float)
with pytest.raises(TypeError):
tc.find_ibd([(0, 1)], max_time=bad_float)
with pytest.raises(_tskit.LibraryError):
tc.find_ibd([(0, 1)], max_time=-1)
with pytest.raises(_tskit.LibraryError):
tc.find_ibd([(0, 1)], min_length=-1)
def test_get_output(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
pairs = [(0, 1), (2, 3)]
result = tc.find_ibd(pairs)
assert isinstance(result, _tskit.IbdResult)
for pair in pairs:
value = result.get(*pair)
assert isinstance(value, dict)
assert len(value) == 3
assert list(value["left"]) == [0]
assert list(value["right"]) == [1]
assert len(value["node"]) == 1
def test_get_bad_args(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
pairs = [(0, 1), (2, 3)]
result = tc.find_ibd(pairs)
with pytest.raises(TypeError):
result.get()
with pytest.raises(TypeError):
result.get("0", 1)
with pytest.raises(_tskit.LibraryError):
result.get(0, 0)
# TODO this should probably be a KeyError, but let's not
# worry about it for now.
with pytest.raises(_tskit.LibraryError):
result.get(0, 2)
def test_print_state(self):
ts = msprime.simulate(10, random_seed=1)
tc = ts.tables._ll_tables
pairs = [(0, 1), (2, 3)]
result = tc.find_ibd(pairs)
with pytest.raises(TypeError):
result.print_state()
with tempfile.TemporaryFile("w+") as f:
result.print_state(f)
f.seek(0)
output = f.read()
assert len(output) > 0
assert "IBD" in output
def test_direct_instantiation(self):
# Nobody should do this, but just in case
result = _tskit.IbdResult()
assert result.total_segments == 0
with tempfile.TemporaryFile("w+") as f:
result.print_state(f)
f.seek(0)
output = f.read()
assert len(output) > 0
assert "IBD" in output
class TestTableMethods:
"""
Tests for the low-level table methods.
"""
@pytest.mark.parametrize("table_name", tskit.TABLE_NAMES)
def test_table_extend(self, table_name, ts_fixture):
table = getattr(ts_fixture.tables, table_name)
assert len(table) >= 5
ll_table = table.ll_table
table_copy = table.copy()
ll_table.extend(table_copy.ll_table, row_indexes=[])
assert table == table_copy
ll_table.clear()
ll_table.extend(table_copy.ll_table, row_indexes=range(len(table_copy)))
assert table == table_copy
@pytest.mark.parametrize("table_name", tskit.TABLE_NAMES)
@pytest.mark.parametrize(
["row_indexes", "expected_rows"],
[
([0], [0]),
([4] * 1000, [4] * 1000),
([4, 1, 3, 0, 0], [4, 1, 3, 0, 0]),
(np.array([0, 1, 4], dtype=np.uint8), [0, 1, 4]),
(np.array([3, 3, 3], dtype=np.uint16), [3, 3, 3]),
(np.array([4, 2, 1], dtype=np.int8), [4, 2, 1]),
(np.array([4, 2], dtype=np.int16), [4, 2]),
(np.array([0, 1], dtype=np.int32), [0, 1]),
(range(2, -1, -1), [2, 1, 0]),
],
)
def test_table_extend_types(
self, ts_fixture, table_name, row_indexes, expected_rows
):
table = getattr(ts_fixture.tables, table_name)
assert len(table) >= 5
ll_table = table.ll_table
table_copy = table.copy()
ll_table.extend(table_copy.ll_table, row_indexes=row_indexes)
assert len(table) == len(table_copy) + len(expected_rows)
for i, expected_row in enumerate(expected_rows):
assert table[len(table_copy) + i] == table_copy[expected_row]
@pytest.mark.parametrize(
["table_name", "column_name"],
[
(t, c)
for t in tskit.TABLE_NAMES
for c in getattr(tskit, f"{t[:-1].capitalize()}Table").column_names
if c[-7:] != "_offset"
],
)
def test_table_update(self, ts_fixture, table_name, column_name):
table = getattr(ts_fixture.tables, table_name)
copy = table.copy()
ll_table = table.ll_table
# Find the first row where this column differs to get a value to swap in
other_row_index = -1
for i, row in enumerate(table):
if not np.array_equal(
getattr(table[0], column_name), getattr(row, column_name)
):
other_row_index = i
assert other_row_index != -1
# No-op update should not create a change
args = ll_table.get_row(0)
ll_table.update_row(0, *args)
table.assert_equals(copy)
# Modify the column under test in the first row
new_args = list(ll_table.get_row(0))
arg_index = list(inspect.signature(table.add_row).parameters.keys()).index(
column_name
)
new_args[arg_index] = ll_table.get_row(other_row_index)[arg_index]
ll_table.update_row(0, *new_args)
for a, b in zip(ll_table.get_row(0), new_args):
np.array_equal(a, b)
def test_update_defaults(self):
t = tskit.IndividualTable()
assert t.add_row(flags=1, location=[1, 2], parents=[3, 4], metadata=b"FOO") == 0
t.ll_table.update_row(0)
assert t.flags[0] == 0
assert len(t.location) == 0
assert t.location_offset[0] == 0
assert len(t.parents) == 0
assert t.parents_offset[0] == 0
assert len(t.metadata) == 0
assert t.metadata_offset[0] == 0
t = tskit.NodeTable()
assert (
t.add_row(flags=1, time=2, population=3, individual=4, metadata=b"FOO") == 0
)
t.ll_table.update_row(0)
assert t.time[0] == 0
assert t.flags[0] == 0
assert t.population[0] == tskit.NULL
assert t.individual[0] == tskit.NULL
assert len(t.metadata) == 0
assert t.metadata_offset[0] == 0
t = tskit.EdgeTable()
assert t.add_row(1, 2, 3, 4, metadata=b"FOO") == 0
t.ll_table.update_row(0, 1, 2, 3, 4)
assert len(t.metadata) == 0
assert t.metadata_offset[0] == 0
t = tskit.MigrationTable()
assert t.add_row(1, 2, 3, 4, 5, 6, b"FOO") == 0
t.ll_table.update_row(0, 1, 2, 3, 4, 5, 6)
assert len(t.metadata) == 0
assert t.metadata_offset[0] == 0
t = tskit.MutationTable()
assert t.add_row(1, 2, "A", 3, b"FOO", 4) == 0
t.ll_table.update_row(0, 1, 2, "A", 3)
assert len(t.metadata) == 0
assert t.metadata_offset[0] == 0
assert tskit.is_unknown_time(t.time[0])
t = tskit.PopulationTable()
assert t.add_row(b"FOO") == 0
t.ll_table.update_row(0)
assert len(t.metadata) == 0
assert t.metadata_offset[0] == 0
def test_update_bad_data(self):
t = tskit.IndividualTable()
t.add_row()
with pytest.raises(TypeError):
t.ll_table.update_row(0, flags="x")
with pytest.raises(TypeError):
t.ll_table.update_row(0, metadata=123)
with pytest.raises(ValueError):
t.ll_table.update_row(0, location="1234")
with pytest.raises(ValueError):
t.ll_table.update_row(0, parents="forty-two")
t = tskit.NodeTable()
t.add_row()
with pytest.raises(TypeError):
t.ll_table.update_row(0, flags="x")
with pytest.raises(TypeError):
t.ll_table.update_row(0, time="x")
with pytest.raises(TypeError):
t.ll_table.update_row(0, individual="x")
with pytest.raises(TypeError):
t.ll_table.update_row(0, population="x")
with pytest.raises(TypeError):
t.ll_table.update_row(0, metadata=123)
t = tskit.EdgeTable()
t.add_row(1, 2, 3, 4)
with pytest.raises(TypeError):
t.ll_table.update_row(0, left="x", right=0, parent=0, child=0)
with pytest.raises(TypeError):
t.ll_table.update_row(
0,
)
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, 0, 0, 0, metadata=123)
t = tskit.SiteTable()
t.add_row(0, "A")
with pytest.raises(TypeError):
t.ll_table.update_row(0, "x", "A")
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, 0)
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, "A", metadata=[0, 1, 2])
t = tskit.MutationTable()
t.add_row(0, 0, "A")
with pytest.raises(TypeError):
t.ll_table.update_row(0, "0", 0, "A")
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, "0", "A")
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, 0, "A", parent=None)
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, 0, "A", metadata=[0])
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, 0, "A", time="A")
t = tskit.MigrationTable()
with pytest.raises(TypeError):
t.add_row(left="x", right=0, node=0, source=0, dest=0, time=0)
with pytest.raises(TypeError):
t.ll_table.update_row(
0,
)
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, 0, 0, 0, 0, 0, metadata=123)
t = tskit.ProvenanceTable()
t.add_row("a", "b")
with pytest.raises(TypeError):
t.ll_table.update_row(0, 0, "b")
with pytest.raises(TypeError):
t.ll_table.update_row(0, "a", 0)
t = tskit.PopulationTable()
t.add_row()
with pytest.raises(TypeError):
t.ll_table.update_row(0, metadata=[0])
class TestTableMethodsErrors:
"""
Tests for the error handling of errors in the low-level tables.
"""
def yield_tables(self, ts):
for table in ts.tables.name_map.values():
yield table.ll_table
@pytest.mark.parametrize(
"table_name",
tskit.TABLE_NAMES,
)
def test_table_extend_bad_args(self, ts_fixture, table_name):
table = getattr(ts_fixture.tables, table_name)
ll_table = table.ll_table
ll_table_copy = table.copy().ll_table
with pytest.raises(
_tskit.LibraryError,
match="Tables can only be extended using rows from a different table",
):
ll_table.extend(ll_table, row_indexes=[])
with pytest.raises(TypeError):
ll_table.extend(None, row_indexes=[])
with pytest.raises(ValueError):
ll_table.extend(ll_table_copy, row_indexes=5)
with pytest.raises(TypeError):
ll_table.extend(ll_table_copy, row_indexes=[None])
with pytest.raises(ValueError, match="object too deep"):
ll_table.extend(ll_table_copy, row_indexes=[[0, 1], [2, 3]])
with pytest.raises(ValueError, match="object too deep"):
ll_table.extend(ll_table_copy, row_indexes=[[0, 1]])
with pytest.raises(_tskit.LibraryError, match="out of bounds"):
ll_table.extend(ll_table_copy, row_indexes=[-1])
with pytest.raises(_tskit.LibraryError, match="out of bounds"):
ll_table.extend(ll_table_copy, row_indexes=[1000])
with pytest.raises(_tskit.LibraryError, match="out of bounds"):
ll_table.extend(ll_table_copy, row_indexes=range(10000000, 10000001))
# Uncastable types
for dtype in [np.uint32, np.int64, np.uint64, np.float32, np.float64]:
with pytest.raises(TypeError, match="Cannot cast"):
ll_table.extend(ll_table_copy, row_indexes=np.array([0], dtype=dtype))
@pytest.mark.parametrize("table_name", tskit.TABLE_NAMES)
def test_update_bad_row_index(self, ts_fixture, table_name):
table = getattr(ts_fixture.tables, table_name)
ll_table = table.ll_table
row_data = ll_table.get_row(0)
with pytest.raises(_tskit.LibraryError, match="out of bounds"):
ll_table.update_row(-1, *row_data)
with pytest.raises(ValueError, match="tskit ids must be"):
ll_table.update_row(-42, *row_data)
with pytest.raises(TypeError):
ll_table.update_row([], *row_data)
with pytest.raises(TypeError):
ll_table.update_row("abc", *row_data)
with pytest.raises(_tskit.LibraryError, match="out of bounds"):
ll_table.update_row(10000, *row_data)
with pytest.raises(OverflowError, match="Value too large for tskit id type"):
ll_table.update_row(2 ** 62, *row_data)
def test_equals_bad_args(self, ts_fixture):
for ll_table in self.yield_tables(ts_fixture):
assert ll_table.equals(ll_table)
with pytest.raises(TypeError):
ll_table.equals(None)
with pytest.raises(TypeError):
ll_table.equals(ll_table, no_such_arg="")
uninit_other = type(ll_table).__new__(type(ll_table))
with pytest.raises(SystemError):
ll_table.equals(uninit_other)
def test_get_row_bad_args(self, ts_fixture):
for ll_table in self.yield_tables(ts_fixture):
assert ll_table.get_row(0) is not None
with pytest.raises(TypeError):
ll_table.get_row(no_such_arg="")
@pytest.mark.parametrize("table", ["nodes", "individuals"])
def test_flag_underflow_overflow(self, table):
tables = _tskit.TableCollection(1)
table = getattr(tables, table)
table.add_row(flags=0)
table.add_row(flags=(1 << 32) - 1)
with pytest.raises(OverflowError, match="unsigned int32 >= than 2\\^32"):
table.add_row(flags=1 << 32)
with pytest.raises(OverflowError, match="int too big to convert"):
table.add_row(flags=1 << 64)
with pytest.raises(OverflowError, match="int too big to convert"):
table.add_row(flags=1 << 256)
with pytest.raises(
ValueError, match="Can't convert negative value to unsigned int"
):
table.add_row(flags=-1)
def test_index(self):
tc = msprime.simulate(10, random_seed=42).tables._ll_tables
assert tc.indexes["edge_insertion_order"].dtype == np.int32
assert tc.indexes["edge_removal_order"].dtype == np.int32
assert np.array_equal(
tc.indexes["edge_insertion_order"], np.arange(18, dtype=np.int32)
)
assert np.array_equal(
tc.indexes["edge_removal_order"], np.arange(18, dtype=np.int32)[::-1]
)
tc.drop_index()
assert tc.indexes == {}
tc.build_index()
assert np.array_equal(
tc.indexes["edge_insertion_order"], np.arange(18, dtype=np.int32)
)
assert np.array_equal(
tc.indexes["edge_removal_order"], np.arange(18, dtype=np.int32)[::-1]
)
modify_indexes = tc.indexes
modify_indexes["edge_insertion_order"] = np.arange(42, 42 + 18, dtype=np.int32)
modify_indexes["edge_removal_order"] = np.arange(
4242, 4242 + 18, dtype=np.int32
)
tc.indexes = modify_indexes
assert np.array_equal(
tc.indexes["edge_insertion_order"], np.arange(42, 42 + 18, dtype=np.int32)
)
assert np.array_equal(
tc.indexes["edge_removal_order"], np.arange(4242, 4242 + 18, dtype=np.int32)
)
def test_no_indexes(self):
tc = msprime.simulate(10, random_seed=42).tables._ll_tables
tc.drop_index()
assert tc.indexes == {}
def test_bad_indexes(self):
tc = msprime.simulate(10, random_seed=42).tables._ll_tables
for col in ("insertion", "removal"):
d = tc.indexes
d[f"edge_{col}_order"] = d[f"edge_{col}_order"][:-1]
with pytest.raises(
ValueError,
match="^edge_insertion_order and"
" edge_removal_order must be the same"
" length$",
):
tc.indexes = d
d = tc.indexes
for col in ("insertion", "removal"):
d[f"edge_{col}_order"] = d[f"edge_{col}_order"][:-1]
with pytest.raises(
ValueError,
match="^edge_insertion_order and edge_removal_order must be"
" the same length as the number of edges$",
):
tc.indexes = d
# Both columns must be provided, if one is
for col in ("insertion", "removal"):
d = tc.indexes
del d[f"edge_{col}_order"]
with pytest.raises(
TypeError,
match="^edge_insertion_order and "
"edge_removal_order must be specified "
"together$",
):
tc.indexes = d
tc = msprime.simulate(
10, recombination_rate=10, random_seed=42
).tables._ll_tables
modify_indexes = tc.indexes
shape = modify_indexes["edge_insertion_order"].shape
modify_indexes["edge_insertion_order"] = np.zeros(shape, dtype=np.int32)
modify_indexes["edge_removal_order"] = np.zeros(shape, dtype=np.int32)
tc.indexes = modify_indexes
ts = _tskit.TreeSequence()
with pytest.raises(
_tskit.LibraryError,
match="^Bad edges: contradictory children for a given"
" parent over an interval$",
):
ts.load_tables(tc, build_indexes=False)
modify_indexes["edge_insertion_order"] = np.full(shape, 2 ** 30, dtype=np.int32)
modify_indexes["edge_removal_order"] = np.full(shape, 2 ** 30, dtype=np.int32)
tc.indexes = modify_indexes
ts = _tskit.TreeSequence()
with pytest.raises(_tskit.LibraryError, match="^Edge out of bounds$"):
ts.load_tables(tc, build_indexes=False)
class TestTreeSequence(LowLevelTestCase, MetadataTestMixin):
"""
Tests for the low-level interface for the TreeSequence.
"""
def setUp(self):
fd, self.temp_file = tempfile.mkstemp(prefix="msp_ll_ts_")
os.close(fd)
def tearDown(self):
os.unlink(self.temp_file)
def test_file_errors(self):
ts1 = self.get_example_tree_sequence()
def loader(*args):
ts2 = _tskit.TreeSequence()
ts2.load(*args)
for func in [ts1.dump, loader]:
with pytest.raises(TypeError):
func()
for bad_type in [None, [], {}]:
with pytest.raises(TypeError):
func(bad_type)
def test_initial_state(self):
# Check the initial state to make sure that it is empty.
ts = _tskit.TreeSequence()
with pytest.raises(ValueError):
ts.get_num_samples()
with pytest.raises(ValueError):
ts.get_sequence_length()
with pytest.raises(ValueError):
ts.get_num_trees()
with pytest.raises(ValueError):
ts.get_num_edges()
with pytest.raises(ValueError):
ts.get_num_mutations()
with pytest.raises(ValueError):
ts.get_num_migrations()
with pytest.raises(ValueError):
ts.get_num_migrations()
with pytest.raises(ValueError):
ts.get_genotype_matrix()
with pytest.raises(ValueError):
ts.dump()
def test_num_nodes(self):
for ts in self.get_example_tree_sequences():
max_node = 0
for j in range(ts.get_num_edges()):
_, _, parent, child, _ = ts.get_edge(j)
for node in [parent, child]:
if node > max_node:
max_node = node
assert max_node + 1 == ts.get_num_nodes()
def test_dump_equality(self, tmp_path):
for ts in self.get_example_tree_sequences():
tables = _tskit.TableCollection(sequence_length=ts.get_sequence_length())
ts.dump_tables(tables)
tables.compute_mutation_times()
ts = _tskit.TreeSequence()
ts.load_tables(tables)
with open(tmp_path / "temp.trees", "wb") as f:
ts.dump(f)
with open(tmp_path / "temp.trees", "rb") as f:
ts2 = _tskit.TreeSequence()
ts2.load(f)
tc = _tskit.TableCollection(ts.get_sequence_length())
ts.dump_tables(tc)
tc2 = _tskit.TableCollection(ts2.get_sequence_length())
ts2.dump_tables(tc2)
assert tc.equals(tc2)
def verify_mutations(self, ts):
mutations = [ts.get_mutation(j) for j in range(ts.get_num_mutations())]
assert ts.get_num_mutations() > 0
assert len(mutations) == ts.get_num_mutations()
# Check the form of the mutations
for j, (position, nodes, index) in enumerate(mutations):
assert j == index
for node in nodes:
assert isinstance(node, int)
assert node >= 0
assert node <= ts.get_num_nodes()
assert isinstance(position, float)
assert position > 0
assert position < ts.get_sequence_length()
# mutations must be sorted by position order.
assert mutations == sorted(mutations)
def test_get_edge_interface(self):
for ts in self.get_example_tree_sequences():
num_edges = ts.get_num_edges()
# We don't accept Python negative indexes here.
with pytest.raises(IndexError):
ts.get_edge(-1)
for j in [0, 10, 10 ** 6]:
with pytest.raises(IndexError):
ts.get_edge(num_edges + j)
for x in [None, "", {}, []]:
with pytest.raises(TypeError):
ts.get_edge(x)
def test_get_node_interface(self):
for ts in self.get_example_tree_sequences():
num_nodes = ts.get_num_nodes()
# We don't accept Python negative indexes here.
with pytest.raises(IndexError):
ts.get_node(-1)
for j in [0, 10, 10 ** 6]:
with pytest.raises(IndexError):
ts.get_node(num_nodes + j)
for x in [None, "", {}, []]:
with pytest.raises(TypeError):
ts.get_node(x)
def test_get_genotype_matrix_interface(self):
for ts in self.get_example_tree_sequences():
num_samples = ts.get_num_samples()
num_sites = ts.get_num_sites()
G = ts.get_genotype_matrix()
assert G.shape == (num_sites, num_samples)
with pytest.raises(TypeError):
ts.get_genotype_matrix(isolated_as_missing=None)
with pytest.raises(TypeError):
ts.get_genotype_matrix(alleles="XYZ")
with pytest.raises(ValueError):
ts.get_genotype_matrix(alleles=tuple())
G = ts.get_genotype_matrix(isolated_as_missing=False)
assert G.shape == (num_sites, num_samples)
def test_get_genotype_matrix_missing_data(self):
tables = _tskit.TableCollection(1)
tables.nodes.add_row(flags=1, time=0)
tables.nodes.add_row(flags=1, time=0)
tables.sites.add_row(0.1, "A")
tables.build_index()
ts = _tskit.TreeSequence(0)
ts.load_tables(tables)
G = ts.get_genotype_matrix(isolated_as_missing=False)
assert np.all(G == 0)
G = ts.get_genotype_matrix(isolated_as_missing=True)
assert np.all(G == -1)
G = ts.get_genotype_matrix()
assert np.all(G == -1)
def test_get_migration_interface(self):
ts = self.get_example_migration_tree_sequence()
for bad_type in ["", None, {}]:
with pytest.raises(TypeError):
ts.get_migration(bad_type)
num_records = ts.get_num_migrations()
# We don't accept Python negative indexes here.
with pytest.raises(IndexError):
ts.get_migration(-1)
for j in [0, 10, 10 ** 6]:
with pytest.raises(IndexError):
ts.get_migration(num_records + j)
def test_get_samples(self):
for ts in self.get_example_tree_sequences():
# get_samples takes no arguments.
with pytest.raises(TypeError):
ts.get_samples(0)
assert np.array_equal(
np.arange(ts.get_num_samples(), dtype=np.int32), ts.get_samples()
)
def test_genealogical_nearest_neighbours(self):
for ts in self.get_example_tree_sequences():
with pytest.raises(TypeError):
ts.genealogical_nearest_neighbours()
with pytest.raises(TypeError):
ts.genealogical_nearest_neighbours(focal=None)
with pytest.raises(TypeError):
ts.genealogical_nearest_neighbours(
focal=ts.get_samples(),
reference_sets={},
)
with pytest.raises(ValueError):
ts.genealogical_nearest_neighbours(
focal=ts.get_samples(),
reference_sets=[],
)
bad_array_values = ["", {}, "x", [[[0], [1, 2]]]]
for bad_array_value in bad_array_values:
with pytest.raises(ValueError):
ts.genealogical_nearest_neighbours(
focal=bad_array_value,
reference_sets=[[0], [1]],
)
with pytest.raises(ValueError):
ts.genealogical_nearest_neighbours(
focal=ts.get_samples(),
reference_sets=[[0], bad_array_value],
)
with pytest.raises(ValueError):
ts.genealogical_nearest_neighbours(
focal=ts.get_samples(),
reference_sets=[bad_array_value],
)
focal = ts.get_samples()
A = ts.genealogical_nearest_neighbours(focal, [focal[2:], focal[:2]])
assert A.shape == (len(focal), 2)
def test_mean_descendants(self):
for ts in self.get_example_tree_sequences():
with pytest.raises(TypeError):
ts.mean_descendants()
with pytest.raises(TypeError):
ts.mean_descendants(reference_sets={})
with pytest.raises(ValueError):
ts.mean_descendants(reference_sets=[])
bad_array_values = ["", {}, "x", [[[0], [1, 2]]]]
for bad_array_value in bad_array_values:
with pytest.raises(ValueError):
ts.mean_descendants(
reference_sets=[[0], bad_array_value],
)
with pytest.raises(ValueError):
ts.mean_descendants(reference_sets=[bad_array_value])
focal = ts.get_samples()
A = ts.mean_descendants([focal[2:], focal[:2]])
assert A.shape == (ts.get_num_nodes(), 2)
def test_metadata_schemas(self):
tables = _tskit.TableCollection(1.0)
# Set the schema
for table_name in self.metadata_tables:
table = getattr(tables, f"{table_name}s")
table.metadata_schema = f"{table_name} test metadata schema"
# Read back via ll tree sequence
tables.build_index()
ts = _tskit.TreeSequence()
ts.load_tables(tables)
schemas = ts.get_table_metadata_schemas()
for table_name in self.metadata_tables:
assert getattr(schemas, table_name) == f"{table_name} test metadata schema"
# Clear and read back again
for table_name in self.metadata_tables:
getattr(tables, f"{table_name}s").metadata_schema = ""
ts = _tskit.TreeSequence()
ts.load_tables(tables)
schemas = ts.get_table_metadata_schemas()
for table_name in self.metadata_tables:
assert getattr(schemas, table_name) == ""
def test_metadata(self):
tables = _tskit.TableCollection(1)
tables.build_index()
ts = _tskit.TreeSequence()
ts.load_tables(tables)
assert ts.get_metadata() == b""
for value in [b"foo", b"", "💩".encode(), b"null char \0 in string"]:
tables.metadata = value
ts = _tskit.TreeSequence()
ts.load_tables(tables)
assert ts.get_metadata() == value
def test_metadata_schema(self):
tables = _tskit.TableCollection(1)
tables.build_index()
ts = _tskit.TreeSequence()
ts.load_tables(tables)
assert ts.get_metadata_schema() == ""
for value in ["foo", "", "💩", "null char \0 in string"]:
tables.metadata_schema = value
ts = _tskit.TreeSequence()
ts.load_tables(tables)
assert ts.get_metadata_schema() == value
def test_kc_distance_errors(self):
ts1 = self.get_example_tree_sequence(10)
with pytest.raises(TypeError):
ts1.get_kc_distance()
with pytest.raises(TypeError):
ts1.get_kc_distance(ts1)
for bad_tree in [None, "tree", 0]:
with pytest.raises(TypeError):
ts1.get_kc_distance(bad_tree, lambda_=0)
for bad_value in ["tree", [], None]:
with pytest.raises(TypeError):
ts1.get_kc_distance(ts1, lambda_=bad_value)
# Different numbers of samples fail.
ts2 = self.get_example_tree_sequence(11)
self.verify_kc_library_error(ts1, ts2)
# Different sequence lengths fail.
ts2 = self.get_example_tree_sequence(10, length=11)
self.verify_kc_library_error(ts1, ts2)
def verify_kc_library_error(self, ts1, ts2):
with pytest.raises(_tskit.LibraryError):
ts1.get_kc_distance(ts2, 0)
def test_kc_distance(self):
ts1 = self.get_example_tree_sequence(10, random_seed=123456)
ts2 = self.get_example_tree_sequence(10, random_seed=1234)
for lambda_ in [-1, 0, 1, 1000, -1e300]:
x1 = ts1.get_kc_distance(ts2, lambda_)
x2 = ts2.get_kc_distance(ts1, lambda_)
assert x1 == x2
def test_load_tables_build_indexes(self):
for ts in self.get_example_tree_sequences():
tables = _tskit.TableCollection(sequence_length=ts.get_sequence_length())
ts.dump_tables(tables)
tables.drop_index()
# Tables not in tc but rebuilt
ts2 = _tskit.TreeSequence()
ts2.load_tables(tables, build_indexes=True)
tables2 = _tskit.TableCollection(sequence_length=ts.get_sequence_length())
ts2.dump_tables(tables2)
assert tables2.has_index()
# Tables not in tc, not rebuilt so error
ts3 = _tskit.TreeSequence()
with pytest.raises(
_tskit.LibraryError, match="Table collection must be indexed"
):
ts3.load_tables(tables)
# Tables in tc, not rebuilt
tables.build_index()
ts4 = _tskit.TreeSequence()
ts4.load_tables(tables, build_indexes=False)
tables4 = _tskit.TableCollection(sequence_length=ts.get_sequence_length())
ts4.dump_tables(tables4)
assert tables4.has_index()
def test_clear_table(self, ts_fixture):
tables = _tskit.TableCollection(
sequence_length=ts_fixture.get_sequence_length()
)
ts_fixture.ll_tree_sequence.dump_tables(tables)
tables.clear()
data_tables = [t for t in tskit.TABLE_NAMES if t != "provenances"]
for table in data_tables:
assert getattr(tables, f"{table}").num_rows == 0
assert len(getattr(tables, f"{table}").metadata_schema) != 0
assert tables.provenances.num_rows > 0
assert len(tables.metadata) > 0
assert len(tables.metadata_schema) > 0
tables.clear(clear_provenance=True)
assert tables.provenances.num_rows == 0
for table in data_tables:
assert len(getattr(tables, f"{table}").metadata_schema) != 0
assert len(tables.metadata) > 0
assert len(tables.metadata_schema) > 0
tables.clear(clear_metadata_schemas=True)
for table in data_tables:
assert len(getattr(tables, f"{table}").metadata_schema) == 0
assert len(tables.metadata) > 0
assert len(tables.metadata_schema) > 0
tables.clear(clear_ts_metadata_and_schema=True)
assert len(tables.metadata) == 0
assert len(tables.metadata_schema) == 0
class StatsInterfaceMixin:
"""
Tests for the interface on specific stats.
"""
def test_mode_errors(self):
_, f, params = self.get_example()
for bad_mode in ["", "not a mode", "SITE", "x" * 8192]:
with pytest.raises(ValueError):
f(mode=bad_mode, **params)
for bad_type in [123, {}, None, [[]]]:
with pytest.raises(TypeError):
f(mode=bad_type, **params)
def test_window_errors(self):
ts, f, params = self.get_example()
del params["windows"]
for bad_array in ["asdf", None, [[[[]], [[]]]], np.zeros((10, 3, 4))]:
with pytest.raises(ValueError):
f(windows=bad_array, **params)
for bad_windows in [[], [0]]:
with pytest.raises(ValueError):
f(windows=bad_windows, **params)
L = ts.get_sequence_length()
bad_windows = [
[L, 0],
[0.1, L],
[-1, L],
[0, L + 0.1],
[0, 0.1, 0.1, L],
[0, -1, L],
[0, 0.1, 0.05, 0.2, L],
]
for bad_window in bad_windows:
with pytest.raises(_tskit.LibraryError):
f(windows=bad_window, **params)
def test_windows_output(self):
ts, f, params = self.get_example()
del params["windows"]
for num_windows in range(1, 10):
windows = np.linspace(0, ts.get_sequence_length(), num=num_windows + 1)
assert windows.shape[0] == num_windows + 1
sigma = f(windows=windows, **params)
assert sigma.shape[0] == num_windows
class WeightMixin(StatsInterfaceMixin):
def get_example(self):
ts, method = self.get_method()
params = {
"weights": np.ones((ts.get_num_samples(), 2)),
"windows": [0, ts.get_sequence_length()],
}
return ts, method, params
def test_bad_weights(self):
ts, f, params = self.get_example()
del params["weights"]
n = ts.get_num_samples()
with pytest.raises(_tskit.LibraryError):
f(weights=np.ones((n, 0)), **params)
for bad_weight_shape in [(n - 1, 1), (n + 1, 1), (0, 3)]:
with pytest.raises(ValueError):
f(weights=np.ones(bad_weight_shape), **params)
def test_output_dims(self):
ts, method, params = self.get_example()
weights = params["weights"]
nw = weights.shape[1]
windows = [0, ts.get_sequence_length()]
for mode in ["site", "branch"]:
out = method(weights[:, [0]], windows, mode=mode)
assert out.shape == (1, 1)
out = method(weights, windows, mode=mode)
assert out.shape == (1, nw)
out = method(weights[:, [0, 0, 0]], windows, mode=mode)
assert out.shape == (1, 3)
mode = "node"
N = ts.get_num_nodes()
out = method(weights[:, [0]], windows, mode=mode)
assert out.shape == (1, N, 1)
out = method(weights, windows, mode=mode)
assert out.shape == (1, N, nw)
out = method(weights[:, [0, 0, 0]], windows, mode=mode)
assert out.shape == (1, N, 3)
class WeightCovariateMixin(StatsInterfaceMixin):
def get_example(self):
ts, method = self.get_method()
params = {
"weights": np.ones((ts.get_num_samples(), 2)),
"covariates": np.array(
[np.arange(ts.get_num_samples()), np.arange(ts.get_num_samples()) ** 2]
).T,
"windows": [0, ts.get_sequence_length()],
}
return ts, method, params
def test_output_dims(self):
ts, method, params = self.get_example()
weights = params["weights"]
nw = weights.shape[1]
windows = [0, ts.get_sequence_length()]
for covariates in (params["covariates"], params["covariates"][:, :0]):
for mode in ["site", "branch"]:
out = method(weights[:, [0]], covariates, windows, mode=mode)
assert out.shape == (1, 1)
out = method(weights, covariates, windows, mode=mode)
assert out.shape == (1, nw)
out = method(weights[:, [0, 0, 0]], covariates, windows, mode=mode)
assert out.shape == (1, 3)
mode = "node"
N = ts.get_num_nodes()
out = method(weights[:, [0]], covariates, windows, mode=mode)
assert out.shape == (1, N, 1)
out = method(weights, covariates, windows, mode=mode)
assert out.shape == (1, N, nw)
out = method(weights[:, [0, 0, 0]], covariates, windows, mode=mode)
assert out.shape == (1, N, 3)
class SampleSetMixin(StatsInterfaceMixin):
def test_bad_sample_sets(self):
ts, f, params = self.get_example()
del params["sample_set_sizes"]
del params["sample_sets"]
with pytest.raises(_tskit.LibraryError):
f(sample_sets=[], sample_set_sizes=[], **params)
n = ts.get_num_samples()
samples = ts.get_samples()
for bad_set_sizes in [[], [1], [n - 1], [n + 1], [n - 3, 1, 1], [1, n - 2]]:
with pytest.raises(ValueError):
f(sample_set_sizes=bad_set_sizes, sample_sets=samples, **params)
N = ts.get_num_nodes()
for bad_node in [-1, N, N + 1, -N]:
with pytest.raises(_tskit.LibraryError):
f(sample_set_sizes=[2], sample_sets=[0, bad_node], **params)
for bad_sample in [n, n + 1, N - 1]:
with pytest.raises(_tskit.LibraryError):
f(sample_set_sizes=[2], sample_sets=[0, bad_sample], **params)
class OneWaySampleStatsMixin(SampleSetMixin):
"""
Tests for one-way sample stats.
"""
def get_example(self):
ts, method = self.get_method()
params = {
"sample_set_sizes": [ts.get_num_samples()],
"sample_sets": ts.get_samples(),
"windows": [0, ts.get_sequence_length()],
}
return ts, method, params
def test_basic_example(self):
ts, method = self.get_method()
result = method(
[ts.get_num_samples()], ts.get_samples(), [0, ts.get_sequence_length()]
)
assert result.shape == (1, 1)
result = method(
[ts.get_num_samples()],
ts.get_samples(),
[0, ts.get_sequence_length()],
mode="node",
)
assert result.shape == (1, ts.get_num_nodes(), 1)
result = method(
[ts.get_num_samples()], ts.get_samples(), ts.get_breakpoints(), mode="node"
)
assert result.shape == (ts.get_num_trees(), ts.get_num_nodes(), 1)
def test_output_dims(self):
ts, method = self.get_method()
samples = ts.get_samples()
windows = [0, ts.get_sequence_length()]
n = len(samples)
for mode in ["site", "branch"]:
pi = method([n], samples, windows, mode=mode)
assert pi.shape == (1, 1)
pi = method([2, n - 2], samples, windows, mode=mode)
assert pi.shape == (1, 2)
pi = method([2, 2, n - 4], samples, windows, mode=mode)
assert pi.shape == (1, 3)
pi = method(np.ones(n).astype(np.uint32), samples, windows, mode=mode)
assert pi.shape == (1, n)
mode = "node"
N = ts.get_num_nodes()
pi = method([n], samples, windows, mode=mode)
assert pi.shape == (1, N, 1)
pi = method([2, n - 2], samples, windows, mode=mode)
assert pi.shape == (1, N, 2)
pi = method([2, 2, n - 4], samples, windows, mode=mode)
assert pi.shape == (1, N, 3)
pi = method(np.ones(n).astype(np.uint32), samples, windows, mode=mode)
assert pi.shape == (1, N, n)
def test_polarised(self):
# TODO move this to the top level.
ts, method = self.get_method()
samples = ts.get_samples()
n = len(samples)
windows = [0, ts.get_sequence_length()]
method([n], samples, windows, polarised=True)
method([n], samples, windows, polarised=False)
class TestDiversity(LowLevelTestCase, OneWaySampleStatsMixin):
"""
Tests for the diversity method.
"""
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.diversity
class TestTraitCovariance(LowLevelTestCase, WeightMixin):
"""
Tests for trait covariance.
"""
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.trait_covariance
class TestTraitCorrelation(LowLevelTestCase, WeightMixin):
"""
Tests for trait correlation.
"""
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.trait_correlation
class TestTraitLinearModel(LowLevelTestCase, WeightCovariateMixin):
"""
Tests for trait correlation.
"""
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.trait_linear_model
class TestSegregatingSites(LowLevelTestCase, OneWaySampleStatsMixin):
"""
Tests for the diversity method.
"""
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.segregating_sites
class TestY1(LowLevelTestCase, OneWaySampleStatsMixin):
"""
Tests for the diversity method.
"""
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.Y1
class TestAlleleFrequencySpectrum(LowLevelTestCase, OneWaySampleStatsMixin):
"""
Tests for the diversity method.
"""
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.allele_frequency_spectrum
def test_basic_example(self):
ts = self.get_example_tree_sequence()
n = ts.get_num_samples()
result = ts.allele_frequency_spectrum(
[n], ts.get_samples(), [0, ts.get_sequence_length()]
)
assert result.shape == (1, n + 1)
result = ts.allele_frequency_spectrum(
[n], ts.get_samples(), [0, ts.get_sequence_length()], polarised=True
)
assert result.shape == (1, n + 1)
def test_output_dims(self):
ts = self.get_example_tree_sequence()
samples = ts.get_samples()
L = ts.get_sequence_length()
n = len(samples)
for mode in ["site", "branch"]:
for s in [[n], [n - 2, 2], [n - 4, 2, 2], [1] * n]:
s = np.array(s, dtype=np.uint32)
windows = [0, L]
for windows in [[0, L], [0, L / 2, L], np.linspace(0, L, num=10)]:
jafs = ts.allele_frequency_spectrum(
s, samples, windows, mode=mode, polarised=True
)
assert jafs.shape == tuple([len(windows) - 1] + list(s + 1))
jafs = ts.allele_frequency_spectrum(
s, samples, windows, mode=mode, polarised=False
)
assert jafs.shape == tuple([len(windows) - 1] + list(s + 1))
def test_node_mode_not_supported(self):
ts = self.get_example_tree_sequence()
with pytest.raises(_tskit.LibraryError):
ts.allele_frequency_spectrum(
[ts.get_num_samples()],
ts.get_samples(),
[0, ts.get_sequence_length()],
mode="node",
)
class TwoWaySampleStatsMixin(SampleSetMixin):
"""
Tests for the two way sample stats.
"""
def get_example(self):
ts, method = self.get_method()
params = {
"sample_set_sizes": [2, ts.get_num_samples() - 2],
"sample_sets": ts.get_samples(),
"indexes": [[0, 1]],
"windows": [0, ts.get_sequence_length()],
}
return ts, method, params
def test_basic_example(self):
ts, method = self.get_method()
div = method(
[2, ts.get_num_samples() - 2],
ts.get_samples(),
[[0, 1]],
windows=[0, ts.get_sequence_length()],
)
assert div.shape == (1, 1)
def test_output_dims(self):
ts, method = self.get_method()
samples = ts.get_samples()
windows = [0, ts.get_sequence_length()]
n = len(samples)
for mode in ["site", "branch"]:
div = method([2, 2, n - 4], samples, [[0, 1]], windows, mode=mode)
assert div.shape == (1, 1)
div = method([2, 2, n - 4], samples, [[0, 1], [1, 2]], windows, mode=mode)
assert div.shape == (1, 2)
div = method(
[2, 2, n - 4], samples, [[0, 1], [1, 2], [0, 1]], windows, mode=mode
)
assert div.shape == (1, 3)
N = ts.get_num_nodes()
mode = "node"
div = method([2, 2, n - 4], samples, [[0, 1]], windows, mode=mode)
assert div.shape == (1, N, 1)
div = method([2, 2, n - 4], samples, [[0, 1], [1, 2]], windows, mode=mode)
assert div.shape == (1, N, 2)
div = method(
[2, 2, n - 4], samples, [[0, 1], [1, 2], [0, 1]], windows, mode=mode
)
assert div.shape == (1, N, 3)
def test_set_index_errors(self):
ts, method = self.get_method()
samples = ts.get_samples()
windows = [0, ts.get_sequence_length()]
n = len(samples)
def f(indexes):
method([2, 2, n - 4], samples, indexes, windows)
for bad_array in ["wer", {}, [[[], []], [[], []]]]:
with pytest.raises(ValueError):
f(bad_array)
for bad_dim in [[[]], [[1], [1]]]:
with pytest.raises(ValueError):
f(bad_dim)
class ThreeWaySampleStatsMixin(SampleSetMixin):
"""
Tests for the two way sample stats.
"""
def get_example(self):
ts, method = self.get_method()
params = {
"sample_set_sizes": [1, 1, ts.get_num_samples() - 2],
"sample_sets": ts.get_samples(),
"indexes": [[0, 1, 2]],
"windows": [0, ts.get_sequence_length()],
}
return ts, method, params
def test_basic_example(self):
ts, method = self.get_method()
div = method(
[1, 1, ts.get_num_samples() - 2],
ts.get_samples(),
[[0, 1, 2]],
windows=[0, ts.get_sequence_length()],
)
assert div.shape == (1, 1)
def test_output_dims(self):
ts, method = self.get_method()
samples = ts.get_samples()
windows = [0, ts.get_sequence_length()]
n = len(samples)
for mode in ["site", "branch"]:
div = method([2, 2, n - 4], samples, [[0, 1, 2]], windows, mode=mode)
assert div.shape == (1, 1)
div = method(
[1, 1, 2, n - 4], samples, [[0, 1, 2], [1, 2, 3]], windows, mode=mode
)
assert div.shape == (1, 2)
div = method(
[1, 1, 2, n - 4],
samples,
[[0, 1, 2], [1, 2, 3], [0, 1, 2]],
windows,
mode=mode,
)
assert div.shape == (1, 3)
N = ts.get_num_nodes()
mode = "node"
div = method([2, 2, n - 4], samples, [[0, 1, 2]], windows, mode=mode)
assert div.shape == (1, N, 1)
div = method(
[1, 1, 2, n - 4], samples, [[0, 1, 2], [1, 2, 3]], windows, mode=mode
)
assert div.shape == (1, N, 2)
div = method(
[1, 1, 2, n - 4],
samples,
[[0, 1, 2], [1, 2, 3], [0, 1, 2]],
windows,
mode=mode,
)
assert div.shape == (1, N, 3)
def test_set_index_errors(self):
ts, method = self.get_method()
samples = ts.get_samples()
windows = [0, ts.get_sequence_length()]
n = len(samples)
def f(indexes):
method([2, 2, n - 4], samples, indexes, windows)
for bad_array in ["wer", {}, [[[], []], [[], []]]]:
with pytest.raises(ValueError):
f(bad_array)
for bad_dim in [[[]], [[1], [1]], [(0, 1)], [(0, 1, 2, 3)]]:
with pytest.raises(ValueError):
f(bad_dim)
class FourWaySampleStatsMixin(SampleSetMixin):
"""
Tests for the four way sample stats.
"""
def get_example(self):
ts, method = self.get_method()
params = {
"sample_set_sizes": [1, 1, 1, ts.get_num_samples() - 3],
"sample_sets": ts.get_samples(),
"indexes": [[0, 1, 2, 3]],
"windows": [0, ts.get_sequence_length()],
}
return ts, method, params
def test_basic_example(self):
ts, method = self.get_method()
div = method(
[1, 1, 1, ts.get_num_samples() - 3],
ts.get_samples(),
[[0, 1, 2, 3]],
windows=[0, ts.get_sequence_length()],
)
assert div.shape == (1, 1)
def test_output_dims(self):
ts, method = self.get_method()
samples = ts.get_samples()
windows = [0, ts.get_sequence_length()]
n = len(samples)
for mode in ["site", "branch"]:
div = method([2, 1, 1, n - 4], samples, [[0, 1, 2, 3]], windows, mode=mode)
assert div.shape == (1, 1)
div = method(
[1, 1, 1, 1, n - 4],
samples,
[[0, 1, 2, 3], [1, 2, 3, 4]],
windows,
mode=mode,
)
assert div.shape == (1, 2)
div = method(
[1, 1, 1, 1, n - 4],
samples,
[[0, 1, 2, 3], [1, 2, 3, 4], [0, 1, 2, 4]],
windows,
mode=mode,
)
assert div.shape == (1, 3)
N = ts.get_num_nodes()
mode = "node"
div = method([2, 1, 1, n - 4], samples, [[0, 1, 2, 3]], windows, mode=mode)
assert div.shape == (1, N, 1)
div = method(
[1, 1, 1, 1, n - 4],
samples,
[[0, 1, 2, 3], [1, 2, 3, 4]],
windows,
mode=mode,
)
assert div.shape == (1, N, 2)
div = method(
[1, 1, 1, 1, n - 4],
samples,
[[0, 1, 2, 3], [1, 2, 3, 4], [0, 1, 2, 4]],
windows,
mode=mode,
)
assert div.shape == (1, N, 3)
def test_set_index_errors(self):
ts, method = self.get_method()
samples = ts.get_samples()
windows = [0, ts.get_sequence_length()]
n = len(samples)
def f(indexes):
method([2, 1, 1, n - 4], samples, indexes, windows)
for bad_array in ["wer", {}, [[[], []], [[], []]]]:
with pytest.raises(ValueError):
f(bad_array)
for bad_dim in [[[]], [[1], [1]], [(0, 1)], [(0, 1, 2, 3, 4)]]:
with pytest.raises(ValueError):
f(bad_dim)
class TestDivergence(LowLevelTestCase, TwoWaySampleStatsMixin):
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.divergence
class TestY2(LowLevelTestCase, TwoWaySampleStatsMixin):
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.Y2
class Testf2(LowLevelTestCase, TwoWaySampleStatsMixin):
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.f2
class TestY3(LowLevelTestCase, ThreeWaySampleStatsMixin):
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.Y3
class Testf3(LowLevelTestCase, ThreeWaySampleStatsMixin):
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.f3
class Testf4(LowLevelTestCase, FourWaySampleStatsMixin):
def get_method(self):
ts = self.get_example_tree_sequence()
return ts, ts.f4
class TestGeneralStatsInterface(LowLevelTestCase, StatsInterfaceMixin):
"""
Tests for the general stats interface.
"""
def get_example(self):
ts = self.get_example_tree_sequence()
W = np.zeros((ts.get_num_samples(), 1))
params = {
"weights": W,
"summary_func": lambda x: np.cumsum(x),
"output_dim": 1,
"windows": ts.get_breakpoints(),
}
return ts, ts.general_stat, params
def test_basic_example(self):
ts = self.get_example_tree_sequence()
W = np.zeros((ts.get_num_samples(), 1))
sigma = ts.general_stat(
W, lambda x: np.cumsum(x), 1, ts.get_breakpoints(), mode="branch"
)
assert sigma.shape == (ts.get_num_trees(), 1)
def test_non_numpy_return(self):
ts = self.get_example_tree_sequence()
W = np.ones((ts.get_num_samples(), 3))
sigma = ts.general_stat(
W, lambda x: [sum(x)], 1, ts.get_breakpoints(), mode="branch"
)
assert sigma.shape == (ts.get_num_trees(), 1)
sigma = ts.general_stat(
W, lambda x: [2, 2], 2, ts.get_breakpoints(), mode="branch"
)
assert sigma.shape == (ts.get_num_trees(), 2)
def test_complicated_numpy_function(self):
ts = self.get_example_tree_sequence(sample_size=20, length=30, random_seed=325)
W = np.zeros((ts.get_num_samples(), 4))
def f(x):
y = np.sum(x * x), np.prod(x + np.arange(x.shape[0]))
return y
sigma = ts.general_stat(W, f, 2, ts.get_breakpoints(), mode="branch")
assert sigma.shape == (ts.get_num_trees(), 2)
def test_input_dims(self):
ts = self.get_example_tree_sequence()
for k in range(1, 20):
W = np.zeros((ts.get_num_samples(), k))
sigma = ts.general_stat(
W, lambda x: np.cumsum(x), k, ts.get_breakpoints(), mode="branch"
)
assert sigma.shape == (ts.get_num_trees(), k)
sigma = ts.general_stat(
W, lambda x: [np.sum(x)], 1, ts.get_breakpoints(), mode="branch"
)
assert sigma.shape == (ts.get_num_trees(), 1)
def test_W_errors(self):
ts = self.get_example_tree_sequence()
n = ts.get_num_samples()
for bad_array in [[], [0, 1], [[[[]], [[]]]], np.zeros((10, 3, 4))]:
with pytest.raises(ValueError):
ts.general_stat(bad_array, lambda x: x, 1, ts.get_breakpoints())
for bad_size in [n - 1, n + 1, 0]:
W = np.zeros((bad_size, 1))
with pytest.raises(ValueError):
ts.general_stat(W, lambda x: x, 1, ts.get_breakpoints())
def test_summary_func_errors(self):
ts = self.get_example_tree_sequence()
W = np.zeros((ts.get_num_samples(), 1))
for bad_type in ["sdf", 1, {}]:
with pytest.raises(TypeError):
ts.general_stat(W, bad_type, 1, ts.get_breakpoints())
# Wrong numbers of arguments to f
with pytest.raises(TypeError):
ts.general_stat(W, lambda: 0, 1, ts.get_breakpoints())
with pytest.raises(TypeError):
ts.general_stat(W, lambda x, y: None, 1, ts.get_breakpoints())
# Exceptions within f are correctly raised.
for exception in [ValueError, TypeError]:
def f(x):
raise exception("test")
with pytest.raises(exception):
ts.general_stat(W, f, 1, ts.get_breakpoints())
# Wrong output dimensions
for bad_array in [[1, 1], range(10)]:
with pytest.raises(ValueError):
ts.general_stat(W, lambda x: bad_array, 1, ts.get_breakpoints())
with pytest.raises(ValueError):
ts.general_stat(W, lambda x: [1], 2, ts.get_breakpoints())
# Bad arrays returned from f
for bad_array in [["sdf"], 0, "w4", None]:
with pytest.raises(ValueError):
ts.general_stat(W, lambda x: bad_array, 1, ts.get_breakpoints())
class TestTreeDiffIterator(LowLevelTestCase):
"""
Tests for the low-level tree diff iterator.
"""
def test_uninitialised_tree_sequence(self):
ts = _tskit.TreeSequence()
with pytest.raises(ValueError):
_tskit.TreeDiffIterator(ts)
def test_constructor(self):
with pytest.raises(TypeError):
_tskit.TreeDiffIterator()
with pytest.raises(TypeError):
_tskit.TreeDiffIterator(None)
ts = self.get_example_tree_sequence()
before = list(_tskit.TreeDiffIterator(ts))
iterator = _tskit.TreeDiffIterator(ts)
del ts
# We should keep a reference to the tree sequence.
after = list(iterator)
assert before == after
def test_iterator(self):
ts = self.get_example_tree_sequence()
self.verify_iterator(_tskit.TreeDiffIterator(ts))
class TestVariantGenerator(LowLevelTestCase):
"""
Tests for the VariantGenerator class.
"""
def test_uninitialised_tree_sequence(self):
ts = _tskit.TreeSequence()
with pytest.raises(ValueError):
_tskit.VariantGenerator(ts)
def test_constructor(self):
with pytest.raises(TypeError):
_tskit.VariantGenerator()
with pytest.raises(TypeError):
_tskit.VariantGenerator(None)
ts = self.get_example_tree_sequence()
with pytest.raises(ValueError):
_tskit.VariantGenerator(ts, samples={})
with pytest.raises(TypeError):
_tskit.VariantGenerator(ts, impute_missing_data=None)
with pytest.raises(_tskit.LibraryError):
_tskit.VariantGenerator(ts, samples=[-1, 2])
with pytest.raises(TypeError):
_tskit.VariantGenerator(ts, alleles=1234)
def test_alleles(self):
ts = self.get_example_tree_sequence()
for bad_type in [["a", "b"], "sdf", 234]:
with pytest.raises(TypeError):
_tskit.VariantGenerator(ts, samples=[1, 2], alleles=bad_type)
with pytest.raises(ValueError):
_tskit.VariantGenerator(ts, samples=[1, 2], alleles=tuple())
for bad_allele_type in [None, 0, b"x", []]:
with pytest.raises(TypeError):
_tskit.VariantGenerator(ts, samples=[1, 2], alleles=(bad_allele_type,))
too_many_alleles = tuple(str(j) for j in range(128))
with pytest.raises(_tskit.LibraryError):
_tskit.VariantGenerator(ts, samples=[1, 2], alleles=too_many_alleles)
def test_iterator(self):
ts = self.get_example_tree_sequence()
self.verify_iterator(_tskit.VariantGenerator(ts))
def test_missing_data(self):
tables = _tskit.TableCollection(1)
tables.nodes.add_row(flags=1, time=0)
tables.nodes.add_row(flags=1, time=0)
tables.sites.add_row(0.1, "A")
tables.build_index()
ts = _tskit.TreeSequence(0)
ts.load_tables(tables)
variant = list(_tskit.VariantGenerator(ts))[0]
_, genotypes, alleles = variant
assert np.all(genotypes == -1)
assert alleles == ("A", None)
class TestLdCalculator(LowLevelTestCase):
"""
Tests for the LdCalculator class.
"""
def test_uninitialised_tree_sequence(self):
ts = _tskit.TreeSequence()
with pytest.raises(ValueError):
_tskit.LdCalculator(ts)
def test_constructor(self):
with pytest.raises(TypeError):
_tskit.LdCalculator()
with pytest.raises(TypeError):
_tskit.LdCalculator(None)
def test_get_r2(self):
ts = self.get_example_tree_sequence()
calc = _tskit.LdCalculator(ts)
n = ts.get_num_sites()
for bad_id in [-1, n, n + 1]:
with pytest.raises(_tskit.LibraryError):
calc.get_r2(0, bad_id)
with pytest.raises(_tskit.LibraryError):
calc.get_r2(bad_id, 0)
def test_get_r2_array(self):
ts = self.get_example_tree_sequence()
calc = _tskit.LdCalculator(ts)
with pytest.raises(TypeError):
calc.get_r2_array()
with pytest.raises(TypeError):
calc.get_r2_array(None)
# Doesn't support buffer protocol, so raises typeerror
with pytest.raises(TypeError):
calc.get_r2_array(None, 0)
n = ts.get_num_sites()
assert n > 2
with pytest.raises(BufferError):
calc.get_r2_array(bytes(100), 0)
buff = bytearray(1024)
with pytest.raises(ValueError):
calc.get_r2_array(buff, 0, max_distance=-1)
with pytest.raises(ValueError):
calc.get_r2_array(buff, 0, direction=1000)
# TODO this API is poor, we should explicitly catch these negative
# size errors.
for bad_max_mutations in [-2, -3]:
with pytest.raises(BufferError):
calc.get_r2_array(buff, 0, max_mutations=bad_max_mutations)
for bad_start_pos in [-1, n, n + 1]:
with pytest.raises(_tskit.LibraryError):
calc.get_r2_array(buff, bad_start_pos)
class TestLsHmm(LowLevelTestCase):
"""
Tests for the LsHmm class.
"""
def test_uninitialised_tree_sequence(self):
ts = _tskit.TreeSequence()
with pytest.raises(ValueError):
_tskit.LsHmm(ts, None, None)
def test_constructor(self):
ts = self.get_example_tree_sequence()
with pytest.raises(TypeError):
_tskit.LsHmm()
with pytest.raises(TypeError):
_tskit.LsHmm(None)
values = np.zeros(ts.get_num_sites())
for bad_array in ["asdf", [[], []], None]:
with pytest.raises(ValueError):
_tskit.LsHmm(ts, bad_array, values)
with pytest.raises(ValueError):
_tskit.LsHmm(ts, values, bad_array)
def test_bad_rate_arrays(self):
ts = self.get_example_tree_sequence()
m = ts.get_num_sites()
assert m > 0
values = np.zeros(m)
for bad_size in [0, m - 1, m + 1, m + 2]:
bad_array = np.zeros(bad_size)
with pytest.raises(ValueError):
_tskit.LsHmm(ts, bad_array, values)
with pytest.raises(ValueError):
_tskit.LsHmm(ts, values, bad_array)
def test_haplotype_input(self):
ts = self.get_example_tree_sequence()
m = ts.get_num_sites()
fm = _tskit.CompressedMatrix(ts)
vm = _tskit.ViterbiMatrix(ts)
ls_hmm = _tskit.LsHmm(ts, np.zeros(m), np.zeros(m))
for bad_size in [0, m - 1, m + 1, m + 2]:
bad_array = np.zeros(bad_size, dtype=np.int8)
with pytest.raises(ValueError):
ls_hmm.forward_matrix(bad_array, fm)
with pytest.raises(ValueError):
ls_hmm.viterbi_matrix(bad_array, vm)
for bad_array in [[0.002], [[], []], None]:
with pytest.raises(ValueError):
ls_hmm.forward_matrix(bad_array, fm)
with pytest.raises(ValueError):
ls_hmm.viterbi_matrix(bad_array, vm)
def test_output_type_errors(self):
ts = self.get_example_tree_sequence()
m = ts.get_num_sites()
h = np.zeros(m, dtype=np.int8)
ls_hmm = _tskit.LsHmm(ts, np.zeros(m), np.zeros(m))
for bad_type in [ls_hmm, None, m, []]:
with pytest.raises(TypeError):
ls_hmm.forward_matrix(h, bad_type)
with pytest.raises(TypeError):
ls_hmm.viterbi_matrix(h, bad_type)
other_ts = self.get_example_tree_sequence()
output = _tskit.CompressedMatrix(other_ts)
with pytest.raises(_tskit.LibraryError):
ls_hmm.forward_matrix(h, output)
output = _tskit.ViterbiMatrix(other_ts)
with pytest.raises(_tskit.LibraryError):
ls_hmm.viterbi_matrix(h, output)
def test_empty_forward_matrix(self):
for mu in [0, 1]:
ts = self.get_example_tree_sequence(mutation_rate=mu)
m = ts.get_num_sites()
fm = _tskit.CompressedMatrix(ts)
assert fm.num_sites == m
assert np.array_equal(np.zeros(m), fm.normalisation_factor)
assert np.array_equal(np.zeros(m, dtype=np.uint32), fm.num_transitions)
F = fm.decode()
assert np.all(F >= 0)
for j in range(m):
assert fm.get_site(j) == []
def test_empty_viterbi_matrix(self):
for mu in [0, 1]:
ts = self.get_example_tree_sequence(mutation_rate=mu)
m = ts.get_num_sites()
vm = _tskit.ViterbiMatrix(ts)
assert vm.num_sites == m
# TODO we should have the same semantics for 0 sites
if m == 0:
h = vm.traceback()
assert len(h) == 0
else:
with pytest.raises(_tskit.LibraryError):
vm.traceback()
def verify_compressed_matrix(self, ts, output):
S = output.normalisation_factor
N = output.num_transitions
assert np.all(0 < S)
assert np.all(S < 1)
assert np.all(N > 0)
F = output.decode()
assert F.shape == (ts.get_num_sites(), ts.get_num_samples())
assert np.all(F >= 0)
m = ts.get_num_sites()
for j in range(m):
site_list = output.get_site(j)
assert len(site_list) == N[j]
for item in site_list:
assert len(item) == 2
node, value = item
assert 0 <= node < ts.get_num_nodes()
assert 0 <= value <= 1
for site in [m, m + 1, 2 * m]:
with pytest.raises(ValueError):
output.get_site(site)
def test_forward_matrix(self):
ts = self.get_example_tree_sequence()
m = ts.get_num_sites()
output = _tskit.CompressedMatrix(ts)
ls_hmm = _tskit.LsHmm(ts, np.zeros(m) + 0.1, np.zeros(m) + 0.1)
rv = ls_hmm.forward_matrix([0 for _ in range(m)], output)
assert rv is None
self.verify_compressed_matrix(ts, output)
def test_viterbi_matrix(self):
ts = self.get_example_tree_sequence()
m = ts.get_num_sites()
output = _tskit.ViterbiMatrix(ts)
ls_hmm = _tskit.LsHmm(ts, np.zeros(m) + 0.1, np.zeros(m) + 0.1)
rv = ls_hmm.viterbi_matrix([0 for _ in range(m)], output)
assert rv is None
self.verify_compressed_matrix(ts, output)
h = output.traceback()
assert isinstance(h, np.ndarray)
class TestTree(LowLevelTestCase):
"""
Tests on the low-level tree interface.
"""
ARRAY_NAMES = ["parent", "left_child", "right_child", "left_sib", "right_sib"]
def test_options(self):
ts = self.get_example_tree_sequence()
st = _tskit.Tree(ts)
assert st.get_options() == 0
all_options = [
0,
_tskit.NO_SAMPLE_COUNTS,
_tskit.SAMPLE_LISTS,
_tskit.NO_SAMPLE_COUNTS | _tskit.SAMPLE_LISTS,
]
for options in all_options:
tree = _tskit.Tree(ts, options=options)
copy = tree.copy()
for st in [tree, copy]:
assert st.get_options() == options
assert st.get_num_samples(0) == 1
if options & _tskit.NO_SAMPLE_COUNTS:
# We should still be able to count the samples, just inefficiently.
assert st.get_num_samples(0) == 1
with pytest.raises(_tskit.LibraryError):
st.get_num_tracked_samples(0)
else:
assert st.get_num_tracked_samples(0) == 0
if options & _tskit.SAMPLE_LISTS:
assert 0 == st.get_left_sample(0)
assert 0 == st.get_right_sample(0)
else:
with pytest.raises(ValueError):
st.get_left_sample(0)
with pytest.raises(ValueError):
st.get_right_sample(0)
with pytest.raises(ValueError):
st.get_next_sample(0)
def test_site_errors(self):
ts = self.get_example_tree_sequence()
for bad_index in [-1, ts.get_num_sites(), ts.get_num_sites() + 1]:
with pytest.raises(IndexError):
ts.get_site(bad_index)
def test_mutation_errors(self):
ts = self.get_example_tree_sequence()
for bad_index in [-1, ts.get_num_mutations(), ts.get_num_mutations() + 1]:
with pytest.raises(IndexError):
ts.get_mutation(bad_index)
def test_individual_errors(self):
ts = self.get_example_tree_sequence()
for bad_index in [-1, ts.get_num_individuals(), ts.get_num_individuals() + 1]:
with pytest.raises(IndexError):
ts.get_individual(bad_index)
def test_population_errors(self):
ts = self.get_example_tree_sequence()
for bad_index in [-1, ts.get_num_populations(), ts.get_num_populations() + 1]:
with pytest.raises(IndexError):
ts.get_population(bad_index)
def test_provenance_errors(self):
ts = self.get_example_tree_sequence()
for bad_index in [-1, ts.get_num_provenances(), ts.get_num_provenances() + 1]:
with pytest.raises(IndexError):
ts.get_provenance(bad_index)
def test_sites(self):
for ts in self.get_example_tree_sequences():
st = _tskit.Tree(ts)
all_sites = [ts.get_site(j) for j in range(ts.get_num_sites())]
all_tree_sites = []
j = 0
mutation_id = 0
while st.next():
tree_sites = st.get_sites()
assert st.get_num_sites() == len(tree_sites)
all_tree_sites.extend(tree_sites)
for (
position,
_ancestral_state,
mutations,
index,
metadata,
) in tree_sites:
assert st.get_left() <= position < st.get_right()
assert index == j
assert metadata == b""
for mut_id in mutations:
(
site,
node,
derived_state,
parent,
metadata,
time,
) = ts.get_mutation(mut_id)
assert site == index
assert mutation_id == mut_id
assert st.get_parent(node) != _tskit.NULL
assert metadata == b""
mutation_id += 1
j += 1
assert all_tree_sites == all_sites
def test_root_threshold_errors(self):
ts = self.get_example_tree_sequence()
tree = _tskit.Tree(ts)
for bad_type in ["", "x", {}]:
with pytest.raises(TypeError):
tree.set_root_threshold(bad_type)
with pytest.raises(_tskit.LibraryError):
tree.set_root_threshold(0)
tree.set_root_threshold(2)
# Setting when not in the null state raises an error
tree.next()
with pytest.raises(_tskit.LibraryError):
tree.set_root_threshold(2)
def test_root_threshold(self):
for ts in self.get_example_tree_sequences():
tree = _tskit.Tree(ts)
for root_threshold in [1, 2, ts.get_num_samples() * 2]:
tree.set_root_threshold(root_threshold)
assert tree.get_root_threshold() == root_threshold
while tree.next():
assert tree.get_root_threshold() == root_threshold
with pytest.raises(_tskit.LibraryError):
tree.set_root_threshold(2)
assert tree.get_root_threshold() == root_threshold
def test_constructor(self):
with pytest.raises(TypeError):
_tskit.Tree()
for bad_type in ["", {}, [], None, 0]:
with pytest.raises(TypeError):
_tskit.Tree(bad_type)
ts = self.get_example_tree_sequence()
for bad_type in ["", {}, True, 1, None]:
with pytest.raises(TypeError):
_tskit.Tree(ts, tracked_samples=bad_type)
for bad_type in ["", {}, None, []]:
with pytest.raises(TypeError):
_tskit.Tree(ts, options=bad_type)
for ts in self.get_example_tree_sequences():
st = _tskit.Tree(ts)
assert st.get_num_nodes() == ts.get_num_nodes()
# An uninitialised tree should always be zero.
assert st.get_left_root() == 0
assert st.get_left() == 0
assert st.get_right() == 0
for j in range(ts.get_num_samples()):
assert st.get_parent(j) == _tskit.NULL
assert st.get_children(j) == tuple()
assert st.get_time(j) == 0
def test_bad_tracked_samples(self):
ts = self.get_example_tree_sequence()
options = 0
for bad_type in ["", {}, [], None]:
with pytest.raises(TypeError):
_tskit.Tree(ts, options=options, tracked_samples=[bad_type])
with pytest.raises(TypeError):
_tskit.Tree(
ts,
options=options,
tracked_samples=[1, bad_type],
)
for bad_sample in [10 ** 6, -1e6]:
with pytest.raises(ValueError):
# Implicit conversion to integers using __int__ is deprecated
with pytest.deprecated_call():
_tskit.Tree(
ts,
options=options,
tracked_samples=[bad_sample],
)
with pytest.raises(ValueError):
with pytest.deprecated_call():
_tskit.Tree(
ts,
options=options,
tracked_samples=[1, bad_sample],
)
with pytest.raises(ValueError):
with pytest.deprecated_call():
_tskit.Tree(ts, tracked_samples=[1, bad_sample, 1])
def test_while_loop_semantics(self):
for ts in self.get_example_tree_sequences():
tree = _tskit.Tree(ts)
# Any mixture of prev and next is OK and gives a valid iteration.
for _ in range(2):
j = 0
while tree.next():
assert tree.get_index() == j
j += 1
assert j == ts.get_num_trees()
for _ in range(2):
j = ts.get_num_trees()
while tree.prev():
assert tree.get_index() == j - 1
j -= 1
assert j == 0
j = 0
while tree.next():
assert tree.get_index() == j
j += 1
assert j == ts.get_num_trees()
def test_count_all_samples(self):
for ts in self.get_example_tree_sequences():
self.verify_iterator(_tskit.TreeDiffIterator(ts))
st = _tskit.Tree(ts)
# Without initialisation we should be 0 samples for every node
# that is not a sample.
for j in range(st.get_num_nodes()):
count = 1 if j < ts.get_num_samples() else 0
assert st.get_num_samples(j) == count
assert st.get_num_tracked_samples(j) == 0
while st.next():
nu = get_sample_counts(ts, st)
nu_prime = [st.get_num_samples(j) for j in range(st.get_num_nodes())]
assert nu == nu_prime
# For tracked samples, this should be all zeros.
nu = [st.get_num_tracked_samples(j) for j in range(st.get_num_nodes())]
assert nu == list([0 for _ in nu])
def test_count_tracked_samples(self):
# Ensure that there are some non-binary nodes.
non_binary = False
for ts in self.get_example_tree_sequences():
st = _tskit.Tree(ts)
while st.next():
for u in range(ts.get_num_nodes()):
if len(st.get_children(u)) > 1:
non_binary = True
samples = [j for j in range(ts.get_num_samples())]
powerset = itertools.chain.from_iterable(
itertools.combinations(samples, r) for r in range(len(samples) + 1)
)
max_sets = 100
for _, subset in zip(range(max_sets), map(list, powerset)):
# Ordering shouldn't make any difference.
random.shuffle(subset)
st = _tskit.Tree(ts, tracked_samples=subset)
while st.next():
nu = get_tracked_sample_counts(st, subset)
nu_prime = [
st.get_num_tracked_samples(j) for j in range(st.get_num_nodes())
]
assert nu == nu_prime
# Passing duplicated values should raise an error
sample = 1
for j in range(2, 20):
tracked_samples = [sample for _ in range(j)]
with pytest.raises(_tskit.LibraryError):
_tskit.Tree(
ts,
tracked_samples=tracked_samples,
)
assert non_binary
def test_bounds_checking(self):
for ts in self.get_example_tree_sequences():
n = ts.get_num_nodes()
st = _tskit.Tree(ts, options=_tskit.SAMPLE_LISTS)
for v in [-100, -1, n + 1, n + 100, n * 100]:
with pytest.raises(ValueError):
st.get_parent(v)
with pytest.raises(ValueError):
st.get_children(v)
with pytest.raises(ValueError):
st.get_time(v)
with pytest.raises(ValueError):
st.get_left_sample(v)
with pytest.raises(ValueError):
st.get_right_sample(v)
with pytest.raises(ValueError):
st.is_descendant(v, 0)
with pytest.raises(ValueError):
st.is_descendant(0, v)
with pytest.raises(ValueError):
st.depth(v)
n = ts.get_num_samples()
for v in [-100, -1, n + 1, n + 100, n * 100]:
with pytest.raises(ValueError):
st.get_next_sample(v)
def test_mrca_interface(self):
for ts in self.get_example_tree_sequences():
num_nodes = ts.get_num_nodes()
st = _tskit.Tree(ts)
for v in [num_nodes, 10 ** 6, _tskit.NULL]:
with pytest.raises(ValueError):
st.get_mrca(v, v)
with pytest.raises(ValueError):
st.get_mrca(v, 1)
with pytest.raises(ValueError):
st.get_mrca(1, v)
# All the mrcas for an uninitialised tree should be _tskit.NULL
for u, v in itertools.combinations(range(num_nodes), 2):
assert st.get_mrca(u, v) == _tskit.NULL
def test_newick_precision(self):
def get_times(tree):
"""
Returns the time strings from the specified newick tree.
"""
ret = []
current_time = None
for c in tree:
if c == ":":
current_time = ""
elif c in [",", ")"]:
ret.append(current_time)
current_time = None
elif current_time is not None:
current_time += c
return ret
ts = self.get_example_tree_sequence()
st = _tskit.Tree(ts)
while st.next():
with pytest.raises(ValueError):
st.get_newick(root=0, precision=-1)
with pytest.raises(ValueError):
st.get_newick(root=0, precision=17)
with pytest.raises(ValueError):
st.get_newick(root=0, precision=100)
for precision in range(17):
tree = st.get_newick(
root=st.get_left_root(), precision=precision
).decode()
times = get_times(tree)
assert len(times) > ts.get_num_samples()
for t in times:
if precision == 0:
assert "." not in t
else:
point = t.find(".")
assert precision == len(t) - point - 1
def test_cleared_tree(self):
ts = self.get_example_tree_sequence()
samples = ts.get_samples()
def check_tree(tree):
assert tree.get_index() == -1
assert tree.get_left_root() == samples[0]
assert tree.get_mrca(0, 1) == _tskit.NULL
for u in range(ts.get_num_nodes()):
assert tree.get_parent(u) == _tskit.NULL
assert tree.get_left_child(u) == _tskit.NULL
assert tree.get_right_child(u) == _tskit.NULL
tree = _tskit.Tree(ts)
check_tree(tree)
while tree.next():
pass
check_tree(tree)
while tree.prev():
pass
check_tree(tree)
def test_newick_interface(self):
ts = self.get_example_tree_sequence()
st = _tskit.Tree(ts)
# TODO this will break when we correctly handle multiple roots.
assert st.get_newick(0) == b"1;"
for bad_type in [None, "", [], {}]:
with pytest.raises(TypeError):
st.get_newick(precision=bad_type)
with pytest.raises(TypeError):
st.get_newick(ts, buffer_size=bad_type)
while st.next():
u = st.get_left_root()
newick = st.get_newick(u)
assert newick.endswith(b";")
with pytest.raises(ValueError):
st.get_newick(u, buffer_size=-1)
with pytest.raises(_tskit.LibraryError):
st.get_newick(u, buffer_size=1)
def test_index(self):
for ts in self.get_example_tree_sequences():
st = _tskit.Tree(ts)
index = 0
while st.next():
assert index == st.get_index()
index += 1
def test_bad_mutations(self):
ts = self.get_example_tree_sequence()
tables = _tskit.TableCollection()
ts.dump_tables(tables)
def f(mutations):
position = []
node = []
site = []
ancestral_state = []
ancestral_state_offset = [0]
derived_state = []
derived_state_offset = [0]
for j, (p, n) in enumerate(mutations):
site.append(j)
position.append(p)
ancestral_state.append("0")
ancestral_state_offset.append(ancestral_state_offset[-1] + 1)
derived_state.append("1")
derived_state_offset.append(derived_state_offset[-1] + 1)
node.append(n)
tables.sites.set_columns(
dict(
position=position,
ancestral_state=ancestral_state,
ancestral_state_offset=ancestral_state_offset,
metadata=None,
metadata_offset=None,
)
)
tables.mutations.set_columns(
dict(
site=site,
node=node,
derived_state=derived_state,
derived_state_offset=derived_state_offset,
parent=None,
metadata=None,
metadata_offset=None,
)
)
ts2 = _tskit.TreeSequence()
ts2.load_tables(tables)
with pytest.raises(_tskit.LibraryError):
f([(0.1, -1)])
length = ts.get_sequence_length()
u = ts.get_num_nodes()
for bad_node in [u, u + 1, 2 * u]:
with pytest.raises(_tskit.LibraryError):
f([(0.1, bad_node)])
for bad_pos in [-1, length, length + 1]:
with pytest.raises(_tskit.LibraryError):
f([(bad_pos, 0)])
def test_sample_list(self):
options = _tskit.SAMPLE_LISTS
# Note: we're assuming that samples are 0-n here.
for ts in self.get_example_tree_sequences():
t = _tskit.Tree(ts, options=options)
while t.next():
# All sample nodes should have themselves.
for j in range(ts.get_num_samples()):
assert t.get_left_sample(j) == j
assert t.get_right_sample(j) == j
# All non-tree nodes should have 0
for j in range(t.get_num_nodes()):
if (
t.get_parent(j) == _tskit.NULL
and t.get_left_child(j) == _tskit.NULL
):
assert t.get_left_sample(j) == _tskit.NULL
assert t.get_right_sample(j) == _tskit.NULL
# The roots should have all samples.
u = t.get_left_root()
samples = []
while u != _tskit.NULL:
sample = t.get_left_sample(u)
end = t.get_right_sample(u)
while True:
samples.append(sample)
if sample == end:
break
sample = t.get_next_sample(sample)
u = t.get_right_sib(u)
assert sorted(samples) == list(range(ts.get_num_samples()))
def test_equality(self):
last_ts = None
for ts in self.get_example_tree_sequences():
t1 = _tskit.Tree(ts)
t2 = _tskit.Tree(ts)
assert t1.equals(t2)
assert t2.equals(t1)
while True:
assert t1.equals(t2)
assert t2.equals(t1)
n1 = t1.next()
assert not t1.equals(t2)
assert not t2.equals(t1)
n2 = t2.next()
assert n1 == n2
if not n1:
break
if last_ts is not None:
t2 = _tskit.Tree(last_ts)
assert not t1.equals(t2)
assert not t2.equals(t1)
last_ts = ts
def test_kc_distance_errors(self):
ts1 = self.get_example_tree_sequence(10)
t1 = _tskit.Tree(ts1, options=_tskit.SAMPLE_LISTS)
t1.first()
with pytest.raises(TypeError):
t1.get_kc_distance()
with pytest.raises(TypeError):
t1.get_kc_distance(t1)
for bad_tree in [None, "tree", 0]:
with pytest.raises(TypeError):
t1.get_kc_distance(bad_tree, lambda_=0)
for bad_value in ["tree", [], None]:
with pytest.raises(TypeError):
t1.get_kc_distance(t1, lambda_=bad_value)
t2 = _tskit.Tree(ts1, options=_tskit.SAMPLE_LISTS)
# If we don't seek to a specific tree, it has multiple roots (i.e., it's
# in the null state). This fails because we don't accept multiple roots.
self.verify_kc_library_error(t2, t2)
# Different numbers of samples fail.
ts2 = self.get_example_tree_sequence(11)
t2 = _tskit.Tree(ts2, options=_tskit.SAMPLE_LISTS)
t2.first()
self.verify_kc_library_error(t1, t2)
# Error when tree not initialized with sample lists
ts2 = self.get_example_tree_sequence(10)
t2 = _tskit.Tree(ts2)
t2.first()
self.verify_kc_library_error(t1, t2)
# Unary nodes cause errors.
tables = _tskit.TableCollection(1.0)
tables.nodes.add_row(flags=1)
tables.nodes.add_row(flags=1, time=1)
tables.edges.add_row(0, 1, 1, 0)
tables.build_index()
ts = _tskit.TreeSequence()
ts.load_tables(tables)
t1 = _tskit.Tree(ts, options=_tskit.SAMPLE_LISTS)
t1.first()
self.verify_kc_library_error(t1, t1)
def verify_kc_library_error(self, t1, t2):
with pytest.raises(_tskit.LibraryError):
t1.get_kc_distance(t2, 0)
def test_kc_distance(self):
ts1 = self.get_example_tree_sequence(10, random_seed=123456)
t1 = _tskit.Tree(ts1, options=_tskit.SAMPLE_LISTS)
t1.first()
ts2 = self.get_example_tree_sequence(10, random_seed=1234)
t2 = _tskit.Tree(ts2, options=_tskit.SAMPLE_LISTS)
t2.first()
for lambda_ in [-1, 0, 1, 1000, -1e300]:
x1 = t1.get_kc_distance(t2, lambda_)
x2 = t2.get_kc_distance(t1, lambda_)
assert x1 == x2
def test_copy(self):
for ts in self.get_example_tree_sequences():
t1 = _tskit.Tree(ts)
t2 = t1.copy()
assert t1.get_index() == t2.get_index()
assert t1 is not t2
while t1.next():
t2 = t1.copy()
assert t1.get_index() == t2.get_index()
def test_map_mutations_null(self):
ts = self.get_example_tree_sequence()
tree = _tskit.Tree(ts)
n = ts.get_num_samples()
genotypes = np.zeros(n, dtype=np.int8)
ancestral_state, transitions = tree.map_mutations(genotypes)
assert ancestral_state == 0
assert len(transitions) == 0
genotypes = np.arange(n, dtype=np.int8)
ancestral_state, transitions = tree.map_mutations(genotypes)
assert ancestral_state == 0
for j in range(n - 1):
assert transitions[j][0] == j + 1
assert transitions[j][1] == -1
assert transitions[j][2] == j + 1
def test_map_mutations(self):
ts = self.get_example_tree_sequence()
tree = _tskit.Tree(ts)
tree.next()
n = ts.get_num_samples()
genotypes = np.zeros(n, dtype=np.int8)
ancestral_state, transitions = tree.map_mutations(genotypes)
assert ancestral_state == 0
assert len(transitions) == 0
def test_map_mutations_fixed_ancestral_state(self):
ts = self.get_example_tree_sequence()
tree = _tskit.Tree(ts)
tree.next()
n = ts.get_num_samples()
genotypes = np.ones(n, dtype=np.int8)
ancestral_state, transitions = tree.map_mutations(genotypes, 0)
assert ancestral_state == 0
assert len(transitions) == 1
def test_map_mutations_errors(self):
ts = self.get_example_tree_sequence()
tree = _tskit.Tree(ts)
n = ts.get_num_samples()
genotypes = np.zeros(n, dtype=np.int8)
with pytest.raises(TypeError):
tree.map_mutations()
for bad_size in [0, 1, n - 1, n + 1]:
with pytest.raises(ValueError):
tree.map_mutations(np.zeros(bad_size, dtype=np.int8))
for bad_type in [None, {}, set()]:
with pytest.raises(TypeError):
tree.map_mutations([bad_type] * n)
for bad_type in [np.uint8, np.uint64, np.float32]:
with pytest.raises(TypeError):
tree.map_mutations(np.zeros(bad_size, dtype=bad_type))
genotypes = np.zeros(n, dtype=np.int8)
tree.map_mutations(genotypes)
for bad_value in [64, 65, 127, -2]:
genotypes[0] = bad_value
with pytest.raises(_tskit.LibraryError):
tree.map_mutations(genotypes)
genotypes = np.zeros(n, dtype=np.int8)
tree.map_mutations(genotypes)
for bad_type in ["d", []]:
with pytest.raises(TypeError):
tree.map_mutations(genotypes, bad_type)
for bad_state in [-2, -1, 127, 255]:
with pytest.raises(_tskit.LibraryError, match="Bad ancestral"):
tree.map_mutations(genotypes, bad_state)
@pytest.mark.parametrize("array", ARRAY_NAMES)
def test_array_read_only(self, array):
name = array + "_array"
ts1 = self.get_example_tree_sequence(10)
t1 = _tskit.Tree(ts1)
t1.first()
with pytest.raises(AttributeError, match="not writable"):
setattr(t1, name, None)
with pytest.raises(AttributeError, match="not writable"):
delattr(t1, name)
a = getattr(t1, name)
with pytest.raises(ValueError, match="assignment destination"):
a[:] = 0
with pytest.raises(ValueError, match="assignment destination"):
a[0] = 0
with pytest.raises(ValueError, match="cannot set WRITEABLE"):
a.setflags(write=True)
@pytest.mark.parametrize("array", ARRAY_NAMES)
def test_array_properties(self, array):
ts1 = self.get_example_tree_sequence(10)
t1 = _tskit.Tree(ts1)
a = getattr(t1, array + "_array")
t1.first()
a = getattr(t1, array + "_array")
assert a.dtype == np.int32
assert a.shape == (ts1.get_num_nodes(),)
assert a.base == t1
assert not a.flags.writeable
assert a.flags.aligned
assert a.flags.c_contiguous
assert not a.flags.owndata
b = getattr(t1, array + "_array")
assert a is not b
assert np.all(a == b)
a_copy = a.copy()
# This checks that the underlying pointer to memory is the same in
# both arrays.
assert a.__array_interface__ == b.__array_interface__
t1.next()
# NB! Because we are pointing to the underlying memory, the arrays
# will change as we iterate along the trees! This is a gotcha, but
# it's just something we have to document as it's a consequence of the
# zero copy semantics.
b = getattr(t1, array + "_array")
assert np.all(a == b)
assert np.any(a_copy != b)
@pytest.mark.parametrize("array", ARRAY_NAMES)
def test_array_lifetime(self, array):
ts1 = self.get_example_tree_sequence(10)
t1 = _tskit.Tree(ts1)
t1.first()
a1 = getattr(t1, array + "_array")
a2 = a1.copy()
assert a1 is not a2
del t1
# Do some memory operations
a3 = | np.ones(10 ** 6) | numpy.ones |
# Authors: <NAME> <<EMAIL>>
# License: Apache 2.0
from PyNomaly import loop
import logging
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
import pytest
from sklearn.datasets import load_iris
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_warns
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# flag to enable or disable NUMBA
NUMBA = False
# load the iris dataset
# and randomly permute it
rng = check_random_state(0)
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# fixtures
@pytest.fixture()
def X_n8() -> np.ndarray:
# Toy sample (the last two samples are outliers):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 2], [1, 2], [2, 1], [5, 3],
[-4, 2]])
return X
@pytest.fixture()
def X_n120() -> np.ndarray:
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
return X
@pytest.fixture()
def X_n140_outliers(X_n120) -> np.ndarray:
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X = np.r_[X_n120, X_outliers]
return X
@pytest.fixture()
def X_n1000() -> np.ndarray:
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(1000, 2)
return X
def test_loop(X_n8) -> None:
# Test LocalOutlierProbability:
clf = loop.LocalOutlierProbability(X_n8, n_neighbors=5, use_numba=NUMBA)
score = clf.fit().local_outlier_probabilities
share_outlier = 2. / 8.
predictions = [-1 if s > share_outlier else 1 for s in score]
assert_array_equal(predictions, 6 * [1] + 2 * [-1])
# Assert smallest outlier score is greater than largest inlier score:
assert_greater(np.min(score[-2:]), np.max(score[:-2]))
# Test the DataFrame functionality
X_df = pd.DataFrame(X_n8)
# Test LocalOutlierProbability:
clf = loop.LocalOutlierProbability(X_df, n_neighbors=5, use_numba=NUMBA)
score = clf.fit().local_outlier_probabilities
share_outlier = 2. / 8.
predictions = [-1 if s > share_outlier else 1 for s in score]
assert_array_equal(predictions, 6 * [1] + 2 * [-1])
# Assert smallest outlier score is greater than largest inlier score:
assert_greater(np.min(score[-2:]), np.max(score[:-2]))
def test_loop_performance(X_n120) -> None:
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X_n120, X_outliers]
X_labels = np.r_[
np.repeat(1, X_n120.shape[0]), np.repeat(-1, X_outliers.shape[0])]
# fit the model
clf = loop.LocalOutlierProbability(
X_test,
n_neighbors=X_test.shape[0] - 1,
# test the progress bar
progress_bar=True,
use_numba=NUMBA
)
# predict scores (the lower, the more normal)
score = clf.fit().local_outlier_probabilities
share_outlier = X_outliers.shape[0] / X_test.shape[0]
X_pred = [-1 if s > share_outlier else 1 for s in score]
# check that roc_auc is good
assert_greater(roc_auc_score(X_pred, X_labels), .98)
def test_input_nodata(X_n140_outliers) -> None:
with pytest.warns(UserWarning) as record:
# attempt to fit loop without data or a distance matrix
loop.LocalOutlierProbability(n_neighbors=X_n140_outliers.shape[0] - 1,
use_numba=NUMBA)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "Data or a distance matrix must be provided."
def test_bad_input_argument(X_n140_outliers) -> None:
with pytest.warns(UserWarning) as record:
# attempt to fit loop with a string input for n_neighbors
loop.LocalOutlierProbability(X_n140_outliers,
n_neighbors=str(
X_n140_outliers.shape[0] - 1),
use_numba=NUMBA
)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "Argument 'n_neighbors' is not of type (<class 'int'>, " \
"<class 'numpy.integer'>)."
def test_neighbor_zero(X_n120) -> None:
clf = loop.LocalOutlierProbability(X_n120, n_neighbors=0, use_numba=NUMBA)
with pytest.warns(UserWarning) as record:
# attempt to fit loop with a 0 neighbor count
clf.fit()
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "n_neighbors must be greater than 0. Fit with 10 instead."
def test_input_distonly(X_n120) -> None:
# generate distance and neighbor indices
neigh = NearestNeighbors(metric='euclidean')
neigh.fit(X_n120)
d, idx = neigh.kneighbors(X_n120, n_neighbors=10, return_distance=True)
with pytest.warns(UserWarning) as record:
# attempt to fit loop with a distance matrix and no neighbor matrix
loop.LocalOutlierProbability(distance_matrix=d, use_numba=NUMBA)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "A neighbor index matrix and distance matrix must both " \
"be provided when not using raw input data."
def test_input_neighboronly(X_n120) -> None:
# generate distance and neighbor indices
neigh = NearestNeighbors(metric='euclidean')
neigh.fit(X_n120)
d, idx = neigh.kneighbors(X_n120, n_neighbors=10, return_distance=True)
with pytest.warns(UserWarning) as record:
# attempt to fit loop with a neighbor matrix and no distance matrix
loop.LocalOutlierProbability(neighbor_matrix=idx, use_numba=NUMBA)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "Data or a distance matrix must be provided."
def test_input_too_many(X_n120) -> None:
# generate distance and neighbor indices
neigh = NearestNeighbors(metric='euclidean')
neigh.fit(X_n120)
d, idx = neigh.kneighbors(X_n120, n_neighbors=10, return_distance=True)
with pytest.warns(UserWarning) as record:
# attempt to fit loop with data and a distance matrix
loop.LocalOutlierProbability(X_n120, distance_matrix=d,
neighbor_matrix=idx, use_numba=NUMBA)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "Only one of the following may be provided: data or a " \
"distance matrix (not both)."
def test_distance_neighbor_shape_mismatch(X_n120) -> None:
# generate distance and neighbor indices
neigh = NearestNeighbors(metric='euclidean')
neigh.fit(X_n120)
d, idx = neigh.kneighbors(X_n120, n_neighbors=10, return_distance=True)
# generate distance and neighbor indices of a different shape
neigh_2 = NearestNeighbors(metric='euclidean')
neigh_2.fit(X_n120)
d_2, idx_2 = neigh.kneighbors(X_n120, n_neighbors=5, return_distance=True)
with pytest.warns(UserWarning) as record:
# attempt to fit loop with a mismatch in shapes
loop.LocalOutlierProbability(
distance_matrix=d,
neighbor_matrix=idx_2,
n_neighbors=5,
use_numba=NUMBA
)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "The shape of the distance and neighbor " \
"index matrices must match."
def test_input_neighbor_mismatch(X_n120) -> None:
# generate distance and neighbor indices
neigh = NearestNeighbors(metric='euclidean')
neigh.fit(X_n120)
d, idx = neigh.kneighbors(X_n120, n_neighbors=5, return_distance=True)
with pytest.warns(UserWarning) as record:
# attempt to fit loop with a neighbor size mismatch
loop.LocalOutlierProbability(distance_matrix=d,
neighbor_matrix=idx,
n_neighbors=10,
use_numba=NUMBA)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "The shape of the distance or " \
"neighbor index matrix does not " \
"match the number of neighbors " \
"specified."
def test_loop_dist_matrix(X_n120) -> None:
# generate distance and neighbor indices
neigh = NearestNeighbors(metric='euclidean')
neigh.fit(X_n120)
d, idx = neigh.kneighbors(X_n120, n_neighbors=10, return_distance=True)
# fit loop using data and distance matrix
clf1 = loop.LocalOutlierProbability(X_n120, use_numba=NUMBA)
clf2 = loop.LocalOutlierProbability(distance_matrix=d, neighbor_matrix=idx,
use_numba=NUMBA)
scores1 = clf1.fit().local_outlier_probabilities
scores2 = clf2.fit().local_outlier_probabilities
# compare the agreement between the results
assert_almost_equal(scores1, scores2, decimal=1)
def test_lambda_values(X_n140_outliers) -> None:
# Fit the model with different extent (lambda) values
clf1 = loop.LocalOutlierProbability(X_n140_outliers, extent=1,
use_numba=NUMBA)
clf2 = loop.LocalOutlierProbability(X_n140_outliers, extent=2,
use_numba=NUMBA)
clf3 = loop.LocalOutlierProbability(X_n140_outliers, extent=3,
use_numba=NUMBA)
# predict scores (the lower, the more normal)
score1 = clf1.fit().local_outlier_probabilities
score2 = clf2.fit().local_outlier_probabilities
score3 = clf3.fit().local_outlier_probabilities
# Get the mean of all the scores
score_mean1 = np.mean(score1)
score_mean2 = np.mean(score2)
score_mean3 = np.mean(score3)
# check that expected the means align with expectation
assert_greater(score_mean1, score_mean2)
assert_greater(score_mean2, score_mean3)
def test_parameters(X_n120) -> None:
# fit the model
clf = loop.LocalOutlierProbability(X_n120, use_numba=NUMBA).fit()
# check that the model has attributes post fit
assert (hasattr(clf, 'n_neighbors') and
clf.n_neighbors is not None)
assert (hasattr(clf, 'extent') and
clf.extent is not None)
assert (hasattr(clf, 'cluster_labels') and
clf._cluster_labels() is not None)
assert (hasattr(clf, 'prob_distances') and
clf.prob_distances is not None)
assert (hasattr(clf, 'prob_distances_ev') and
clf.prob_distances_ev is not None)
assert (hasattr(clf, 'norm_prob_local_outlier_factor') and
clf.norm_prob_local_outlier_factor is not None)
assert (hasattr(clf, 'local_outlier_probabilities') and
clf.local_outlier_probabilities is not None)
def test_n_neighbors() -> None:
X = iris.data
clf = loop.LocalOutlierProbability(X, n_neighbors=500,
use_numba=NUMBA).fit()
assert_equal(clf.n_neighbors, X.shape[0] - 1)
clf = loop.LocalOutlierProbability(X, n_neighbors=500, use_numba=NUMBA)
assert_warns(UserWarning, clf.fit)
assert_equal(clf.n_neighbors, X.shape[0] - 1)
def test_extent() -> None:
X = np.array([[1, 1], [1, 0]])
clf = loop.LocalOutlierProbability(X, n_neighbors=2, extent=4,
use_numba=NUMBA)
assert_warns(UserWarning, clf.fit)
def test_data_format() -> None:
X = [1.3, 1.1, 0.9, 1.4, 1.5, 3.2]
clf = loop.LocalOutlierProbability(X, n_neighbors=3, use_numba=NUMBA)
assert_warns(UserWarning, clf.fit)
def test_missing_values() -> None:
X = np.array([1.3, 1.1, 0.9, 1.4, 1.5, np.nan, 3.2])
clf = loop.LocalOutlierProbability(X, n_neighbors=3, use_numba=NUMBA)
with pytest.raises(SystemExit) as record_a, pytest.warns(
UserWarning) as record_b:
clf.fit()
assert record_a.type == SystemExit
# check that only one warning was raised
assert len(record_b) == 1
# check that the message matches
assert record_b[0].message.args[
0] == "Method does not support missing values in input data."
def test_small_cluster_size(X_n140_outliers) -> None:
# Generate cluster labels
a = [0] * 120
b = [1] * 18
cluster_labels = a + b
clf = loop.LocalOutlierProbability(
X_n140_outliers,
n_neighbors=50,
cluster_labels=cluster_labels,
use_numba=NUMBA
)
with pytest.raises(SystemExit) as record_a, pytest.warns(
UserWarning) as record_b:
clf.fit()
assert record_a.type == SystemExit
# check that only one warning was raised
assert len(record_b) == 1
# check that the message matches
assert record_b[0].message.args[
0] == "Number of neighbors specified larger than smallest " \
"cluster. Specify a number of neighbors smaller than " \
"the smallest cluster size (observations in smallest " \
"cluster minus one)."
def test_stream_fit(X_n140_outliers) -> None:
# Fit the model
X_train = X_n140_outliers[0:138]
X_test = X_n140_outliers[139]
clf = loop.LocalOutlierProbability(X_train, use_numba=NUMBA)
with pytest.warns(UserWarning) as record:
clf.stream(X_test)
# check that the message matches
messages = [i.message.args[0] for i in record]
assert "Must fit on historical data by calling fit() prior to " \
"calling stream(x)." in messages
def test_stream_distance(X_n140_outliers) -> None:
X_train = X_n140_outliers[0:100]
X_test = X_n140_outliers[100:140]
# generate distance and neighbor indices
neigh = NearestNeighbors(metric='euclidean')
neigh.fit(X_train)
d, idx = neigh.kneighbors(X_train, n_neighbors=10, return_distance=True)
# Fit the models in standard and distance matrix form
m = loop.LocalOutlierProbability(X_train, use_numba=NUMBA).fit()
m_dist = loop.LocalOutlierProbability(distance_matrix=d,
neighbor_matrix=idx,
use_numba=NUMBA).fit()
# Collect the scores
X_test_scores = []
for i in range(X_test.shape[0]):
X_test_scores.append(m.stream(np.array(X_test[i])))
X_test_scores = np.array(X_test_scores)
X_test_dist_scores = []
for i in range(X_test.shape[0]):
dd, ii = neigh.kneighbors( | np.array([X_test[i]]) | numpy.array |
# coding=utf-8
import numpy as np
from pandas import (DataFrame, date_range, Timestamp, Series,
to_datetime)
import pandas.util.testing as tm
from .common import TestData
class TestFrameAsof(TestData):
def setup_method(self, method):
self.N = N = 50
self.rng = date_range('1/1/1990', periods=N, freq='53s')
self.df = DataFrame({'A': | np.arange(N) | numpy.arange |
# FIXME: this file contains 9 errors according to PyCharm
import time
import logging
import numpy as np
import warnings
import pytest
import cma
import datetime
import multiprocessing
from .qubit_object import Qubit
from pycqed.measurement import calibration_toolbox as cal_toolbox
from pycqed.measurement import sweep_functions as swf
from pycqed.measurement import detector_functions as det
from pycqed.measurement.mc_parameter_wrapper import wrap_par_to_swf
import pycqed.measurement.composite_detector_functions as cdf
from pycqed.measurement.optimization import nelder_mead
from pycqed.measurement.openql_experiments import single_qubit_oql as sqo
import pycqed.measurement.openql_experiments.multi_qubit_oql as mqo
from pycqed.measurement.openql_experiments import clifford_rb_oql as cl_oql
from pycqed.measurement.openql_experiments import pygsti_oql
from pycqed.measurement.openql_experiments import openql_helpers as oqh
from pycqed.measurement.openql_experiments.openql_helpers import \
load_range_of_oql_programs, load_range_of_oql_programs_from_filenames
from pycqed.analysis import measurement_analysis as ma
from pycqed.analysis import analysis_toolbox as a_tools
from pycqed.analysis.tools import cryoscope_tools as ct
from pycqed.analysis.tools import plotting as plt_tools
from pycqed.analysis_v2 import measurement_analysis as ma2
from pycqed.utilities.general import gen_sweep_pts
from pycqed.utilities.learnerND_minimizer import LearnerND_Minimizer, \
mk_minimization_loss_func, mk_minimization_goal_func
# Imported for a type check
from pycqed.instrument_drivers.physical_instruments.QuTech_AWG_Module \
import QuTech_AWG_Module
from qcodes.utils import validators as vals
from qcodes.instrument.parameter import (
ManualParameter, InstrumentRefParameter)
log = logging.getLogger(__name__)
class CCLight_Transmon(Qubit):
"""
The CCLight_Transmon
Setup configuration:
Drive: CCLight controlling AWG8's and a VSM
Acquisition: UHFQC
Readout pulse configuration: LO modulated using UHFQC AWG
"""
def __init__(self, name, **kw):
t0 = time.time()
super().__init__(name, **kw)
self.add_parameters()
self.connect_message(begin_time=t0)
##########################################################################
# Overrides for class Qubit
##########################################################################
def add_instrument_ref_parameters(self):
self.add_parameter('instr_device',
docstring='Represents sample, contains all qubits '
'and resonators',
parameter_class=InstrumentRefParameter)
# MW sources
self.add_parameter('instr_LO_ro',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_LO_mw',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_spec_source',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_spec_source_2',
parameter_class=InstrumentRefParameter)
# Control electronics
self.add_parameter(
'instr_CC', label='Central Controller',
docstring=('Device responsible for controlling the experiment'
' using eQASM generated using OpenQL, in the near'
' future will be the CC_Light.'),
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_acquisition',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_VSM', label='Vector Switch Matrix',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_MC', label='MeasurementControl',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_nested_MC',
label='Nested MeasurementControl',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_SH', label='SignalHound',
parameter_class=InstrumentRefParameter)
self.add_parameter(
'instr_FluxCtrl', label='Flux control', docstring=(
'Instrument used to control flux can either be an IVVI rack '
'or a meta instrument such as the Flux control.'),
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_VNA',
docstring='Vector Network Analyzer',
parameter_class=InstrumentRefParameter,
initial_value=None)
# LutMan's
self.add_parameter('instr_LutMan_MW',
docstring='Lookuptable manager for '
'microwave control pulses.',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_LutMan_RO',
docstring='Lookuptable manager responsible for '
'microwave readout pulses.',
parameter_class=InstrumentRefParameter)
self.add_parameter('instr_LutMan_Flux',
docstring='Lookuptable manager responsible for '
'flux pulses.',
initial_value=None,
parameter_class=InstrumentRefParameter)
def add_ro_parameters(self):
"""
Adding the parameters relevant for readout.
"""
################################
# RO stimulus/pulse parameters #
################################
self.add_parameter('ro_freq',
label='Readout frequency', unit='Hz',
parameter_class=ManualParameter)
self.add_parameter('ro_freq_mod',
label='Readout-modulation frequency', unit='Hz',
initial_value=-20e6,
parameter_class=ManualParameter)
self.add_parameter('ro_pow_LO', label='RO power LO',
unit='dBm', initial_value=20,
parameter_class=ManualParameter)
# RO pulse parameters
self.add_parameter('ro_pulse_type', initial_value='simple',
vals=vals.Enum('gated', 'simple',
'up_down_down', 'up_down_down_final'),
parameter_class=ManualParameter)
# Mixer offsets correction, RO pulse
self.add_parameter('ro_pulse_mixer_offs_I', unit='V',
parameter_class=ManualParameter, initial_value=0)
self.add_parameter('ro_pulse_mixer_offs_Q', unit='V',
parameter_class=ManualParameter, initial_value=0)
self.add_parameter('ro_pulse_mixer_alpha', initial_value=1,
parameter_class=ManualParameter)
self.add_parameter('ro_pulse_mixer_phi', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('ro_pulse_length',
label='Readout pulse length',
initial_value=100e-9,
unit='s',
parameter_class=ManualParameter)
self.add_parameter('ro_pulse_amp', unit='V',
label='Readout pulse amplitude',
initial_value=0.1,
parameter_class=ManualParameter)
self.add_parameter('ro_pulse_amp_CW', unit='V',
label='Readout pulse amplitude',
initial_value=0.1,
parameter_class=ManualParameter)
self.add_parameter('ro_pulse_phi', unit='deg', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('ro_pulse_down_length0', unit='s',
initial_value=1e-9,
parameter_class=ManualParameter)
self.add_parameter('ro_pulse_down_amp0', unit='V', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('ro_pulse_down_phi0', unit='deg', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('ro_pulse_down_length1', unit='s',
initial_value=1e-9,
parameter_class=ManualParameter)
self.add_parameter('ro_pulse_down_amp1', unit='V', initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('ro_pulse_down_phi1', unit='deg', initial_value=0,
parameter_class=ManualParameter)
#############################
# RO acquisition parameters #
#############################
ro_acq_docstr = (
'Determines what type of integration weights to use: '
'\n\t SSB: Single sideband demodulation\n\t'
'DSB: Double sideband demodulation\n\t'
'optimal: waveforms specified in "RO_acq_weight_func_I" '
'\n\tand "RO_acq_weight_func_Q"')
self.add_parameter('ro_acq_weight_type',
initial_value='SSB',
vals=vals.Enum(
'SSB', 'DSB', 'optimal', 'optimal IQ'),
docstring=ro_acq_docstr,
parameter_class=ManualParameter)
self.add_parameter(
'ro_acq_weight_chI', initial_value=0, docstring=(
'Determines the I-channel for integration. When the'
' ro_acq_weight_type is optimal only this channel will '
'affect the result.'), vals=vals.Ints(0, 9),
parameter_class=ManualParameter)
self.add_parameter(
'ro_acq_weight_chQ', initial_value=1, docstring=(
'Determines the Q-channel for integration.'),
vals=vals.Ints(0, 9), parameter_class=ManualParameter)
self.add_parameter('ro_acq_weight_func_I',
vals=vals.Arrays(),
label='Optimized weights for I channel',
parameter_class=ManualParameter)
self.add_parameter('ro_acq_weight_func_Q',
vals=vals.Arrays(),
label='Optimized weights for Q channel',
parameter_class=ManualParameter)
# FIXME!: Dirty hack because of qusurf issue #63, added 2 hardcoded
# delay samples in the optimized weights
self.add_parameter('ro_acq_weight_func_delay_samples_hack',
vals=vals.Ints(),
initial_value=0,
label='weight function delay samples',
parameter_class=ManualParameter)
self.add_parameter(
'ro_acq_delay', unit='s',
label='Readout acquisition delay',
vals=vals.Numbers(min_value=0),
initial_value=0,
parameter_class=ManualParameter,
docstring=('The time between the instruction that trigger the'
' readout pulse and the instruction that triggers the '
'acquisition. The positive number means that the '
'acquisition is started after the pulse is send.'))
self.add_parameter(
'ro_pulse_delay', unit='s',
label='Readout acquisition delay',
vals=vals.Numbers(0, 1e-6),
initial_value=0,
parameter_class=ManualParameter,
docstring=('The delay time for the readout pulse'))
self.add_parameter(
'ro_acq_mixer_phi', unit='degree',
label='Readout mixer phi',
vals=vals.Numbers(),
initial_value=0,
parameter_class=ManualParameter,
docstring=('acquisition mixer phi, used for mixer deskewing in'
'real time'))
self.add_parameter(
'ro_acq_mixer_alpha', unit='',
label='Readout mixer alpha',
vals=vals.Numbers(min_value=0.8),
initial_value=1,
parameter_class=ManualParameter,
docstring=('acquisition mixer alpha, used for mixer deskewing in'
'real time'))
self.add_parameter(
'ro_acq_input_average_length', unit='s',
label='Readout acquisition delay',
vals=vals.Numbers(min_value=0, max_value=4096/1.8e9),
initial_value=4096/1.8e9,
parameter_class=ManualParameter,
docstring=('The measurement time in input averaging.'))
self.add_parameter('ro_acq_integration_length', initial_value=500e-9,
vals=vals.Numbers(
min_value=0, max_value=4096/1.8e9),
parameter_class=ManualParameter)
self.add_parameter('ro_acq_averages', initial_value=1024,
vals=vals.Numbers(min_value=0, max_value=1e6),
parameter_class=ManualParameter)
self.add_parameter('ro_soft_avg', initial_value=1,
docstring=('Number of soft averages to be '
'performed using the MC.'),
vals=vals.Ints(min_value=1),
parameter_class=ManualParameter)
# self.add_parameter('ro_power_cw', label='RO power cw',
# unit='dBm',
# parameter_class=ManualParameter)
# Single shot readout specific parameters
self.add_parameter('ro_acq_digitized', vals=vals.Bool(),
initial_value=False,
parameter_class=ManualParameter)
self.add_parameter('ro_acq_threshold', unit='dac-value',
initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('ro_acq_rotated_SSB_when_optimal', vals=vals.Bool(),
docstring=(
'bypasses optimal weights, and uses rotated SSB instead'),
initial_value=False,
parameter_class=ManualParameter)
self.add_parameter('ro_acq_rotated_SSB_rotation_angle', vals=vals.Numbers(
min_value=-np.pi, max_value=np.pi),
docstring=(
'uses this as the rotation angle for rotated SSB'),
initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('ro_acq_integration_length_weigth_function', vals=vals.Numbers(
min_value=0, max_value=4096/1.8e9),
docstring=(
'sets weight function elements to 0 beyond this time'),
initial_value=4096/1.8e9,
parameter_class=ManualParameter)
# self.add_parameter('cal_pt_zero',
# initial_value=None,
# vals=vals.Anything(), # should be a tuple validator
# label='Calibration point |0>',
# parameter_class=ManualParameter)
# self.add_parameter('cal_pt_one',
# initial_value=None,
# vals=vals.Anything(), # should be a tuple validator
# label='Calibration point |1>',
# parameter_class=ManualParameter)
def add_mw_parameters(self):
# Mixer skewness correction
self.add_parameter('mw_G_mixer_phi', unit='deg',
label='Mixer skewness phi Gaussian quadrature',
parameter_class=ManualParameter, initial_value=0)
self.add_parameter('mw_G_mixer_alpha', unit='',
label='Mixer skewness alpha Gaussian quadrature',
parameter_class=ManualParameter, initial_value=1)
self.add_parameter('mw_D_mixer_phi', unit='deg',
label='Mixer skewness phi Derivative quadrature',
parameter_class=ManualParameter, initial_value=0)
self.add_parameter('mw_D_mixer_alpha', unit='',
label='Mixer skewness alpha Derivative quadrature',
parameter_class=ManualParameter, initial_value=1)
# Mixer offsets correction, qubit drive
self.add_parameter('mw_mixer_offs_GI',
unit='V',
parameter_class=ManualParameter, initial_value=0)
self.add_parameter('mw_mixer_offs_GQ', unit='V',
parameter_class=ManualParameter, initial_value=0)
self.add_parameter('mw_mixer_offs_DI',
unit='V',
parameter_class=ManualParameter, initial_value=0)
self.add_parameter('mw_mixer_offs_DQ', unit='V',
parameter_class=ManualParameter, initial_value=0)
self.add_parameter('mw_pow_td_source',
label='Time-domain power',
unit='dBm',
initial_value=20,
parameter_class=ManualParameter)
self.add_parameter('mw_freq_mod',
initial_value=-100e6,
label='pulse-modulation frequency', unit='Hz',
parameter_class=ManualParameter)
self.add_parameter('mw_amp180',
label='Pi-pulse amplitude', unit='V',
initial_value=.8,
parameter_class=ManualParameter)
self.add_parameter('mw_amp90_scale',
label='pulse amplitude scaling factor',
unit='',
initial_value=.5,
vals=vals.Numbers(min_value=0, max_value=1.0),
parameter_class=ManualParameter)
self.add_parameter('mw_channel_amp',
label='AWG channel amplitude. WARNING: Check your hardware specific limits!',
unit='',
initial_value=.5,
vals=vals.Numbers(min_value=0, max_value=1.6),
parameter_class=ManualParameter)
self.add_parameter('mw_channel_range',
label='AWG channel range. WARNING: Check your hardware specific limits!',
unit='V',
initial_value=.8,
vals=vals.Enum(0.2, 0.4, 0.6, 0.8, 1, 2, 3, 4, 5),
parameter_class=ManualParameter)
self.add_parameter('mw_ef_amp',
label='Pi-pulse amplitude ef-transition', unit='V',
initial_value=.4,
parameter_class=ManualParameter)
self.add_parameter('mw_awg_ch', parameter_class=ManualParameter,
initial_value=1,
vals=vals.Ints())
self.add_parameter('mw_gauss_width', unit='s',
initial_value=10e-9,
parameter_class=ManualParameter)
self.add_parameter('mw_motzoi', label='Motzoi parameter', unit='',
initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('mw_vsm_marker_source',
label='VSM switch state',
initial_value='int',
vals=vals.Enum('ext', 'int'),
parameter_class=ManualParameter)
self._mw_vsm_delay = 0
self.add_parameter(
'mw_vsm_delay', label='CCL VSM trigger delay',
vals=vals.Ints(0, 127), unit='samples',
docstring=('This value needs to be calibrated to ensure that '
'the VSM mask aligns with the microwave pulses. '
'Calibration is done using'
' self.calibrate_mw_vsm_delay.'),
set_cmd=self._set_mw_vsm_delay,
get_cmd=self._get_mw_vsm_delay)
self._mw_fine_delay = 0
self.add_parameter('mw_fine_delay', label='fine delay of the AWG channel',
unit='s',
docstring='This parameters serves for fine tuning of '
'the RO, MW and flux pulses. It should be kept '
'positive and below 20e-9. Any larger adjustments'
'should be done by changing CCL dio delay'
'through device object.',
set_cmd=self._set_mw_fine_delay,
get_cmd=self._get_mw_fine_delay)
self._flux_fine_delay = 0
self.add_parameter('flux_fine_delay', label='fine delay of the AWG channel',
unit='s',
docstring='This parameters serves for fine tuning of '
'the RO, MW and flux pulses. It should be kept '
'positive and below 20e-9. Any larger adjustments'
'should be done by changing CCL dio delay'
'through device object.',
set_cmd=self._set_flux_fine_delay,
get_cmd=self._get_flux_fine_delay)
self.add_parameter('mw_vsm_ch_in',
label='VSM input channel Gaussian component',
vals=vals.Ints(1, 4),
initial_value=1,
parameter_class=ManualParameter)
self.add_parameter('mw_vsm_mod_out',
label='VSM output module for microwave pulses',
docstring=('Selects the VSM output module for MW'
' pulses. N.B. for spec the '
'spec_vsm_ch_out parameter is used.'),
vals=vals.Ints(1, 8),
initial_value=1,
parameter_class=ManualParameter)
self.add_parameter('mw_vsm_G_amp',
label='VSM amp Gaussian component',
vals=vals.Numbers(0.1, 1.0),
initial_value=1.0,
parameter_class=ManualParameter)
self.add_parameter('mw_vsm_D_amp',
label='VSM amp Derivative component',
vals=vals.Numbers(0.1, 1.0),
initial_value=1.0,
parameter_class=ManualParameter)
self.add_parameter('mw_vsm_G_phase',
vals=vals.Numbers(-125, 45),
initial_value=0, unit='deg',
parameter_class=ManualParameter)
self.add_parameter('mw_vsm_D_phase',
vals=vals.Numbers(-125, 45),
initial_value=0, unit='deg',
parameter_class=ManualParameter)
def add_spec_parameters(self):
self.add_parameter('spec_vsm_amp',
label='VSM amplitude for spec pulses',
vals=vals.Numbers(0.1, 1.0),
initial_value=1.0,
parameter_class=ManualParameter)
self.add_parameter('spec_vsm_mod_out',
label='VSM output module for spectroscopy pulses',
docstring=('Selects the VSM output channel for spec'
' pulses. N.B. for mw pulses the '
'spec_mw_ch_out parameter is used.'),
vals=vals.Ints(1, 8),
initial_value=1,
parameter_class=ManualParameter)
self.add_parameter('spec_vsm_ch_in',
label='VSM input channel for spec pulses',
docstring=('VSM input channel for spec pulses'
' generally this should be the same as '
' the mw_vsm_ch_Gin parameter.'),
vals=vals.Ints(1, 4),
initial_value=1,
parameter_class=ManualParameter)
self.add_parameter('spec_pulse_length',
label='Pulsed spec pulse duration',
unit='s', vals=vals.Numbers(0e-9, 20e-6),
# FIXME validator: should be multiple of 20e-9
initial_value=500e-9,
parameter_class=ManualParameter)
self.add_parameter(
'spec_type', parameter_class=ManualParameter, docstring=(
'determines what kind of spectroscopy to do, \n'
'"CW": opens the relevant VSM channel to always let the tone '
'through. \n'
'"vsm_gated": uses the VSM in external mode to gate the spec '
'source. \n '
'"IQ" uses the TD source and AWG8 to generate a spec pulse'),
initial_value='CW',
vals=vals.Enum('CW', 'IQ', 'vsm_gated'))
self.add_parameter(
'spec_amp', unit='V', docstring=(
'Amplitude of the spectroscopy pulse in the mw LutMan. '
'The power of the spec pulse should be controlled through '
'the vsm amplitude "spec_vsm_amp"'),
vals=vals.Numbers(0, 1), parameter_class=ManualParameter,
initial_value=0.8)
self.add_parameter(
'spec_pow', unit='dB',
vals=vals.Numbers(-70, 20),
parameter_class=ManualParameter,
initial_value=-30)
self.add_parameter(
'spec_wait_time', unit='s',
vals=vals.Numbers(0, 100e-6),
parameter_class=ManualParameter,
initial_value=0)
def add_flux_parameters(self):
# fl_dc_ is the prefix for DC flux bias related params
# FIXME:
self.add_parameter(
'fl_dc_polycoeff',
docstring='Polynomial coefficients for current to frequency conversion',
vals=vals.Arrays(),
# initial value is chosen to not raise errors
initial_value=np.array([0, 0, -1e12, 0, 6e9]),
parameter_class=ManualParameter)
self.add_parameter(
'fl_ac_polycoeff',
docstring='Polynomial coefficients for current to frequency conversion',
vals=vals.Arrays(),
# initial value is chosen to not raise errors
initial_value=np.array([0, 0, -1e12, 0, 6e9]),
parameter_class=ManualParameter)
self.add_parameter(
'fl_dc_I_per_phi0', label='Flux bias I/Phi0',
docstring='Conversion factor for flux bias, current per flux quantum',
vals=vals.Numbers(), unit='A', initial_value=10e-3,
parameter_class=ManualParameter)
self.add_parameter(
'fl_dc_I', label='Flux bias', unit='A',
docstring='Current flux bias setting', vals=vals.Numbers(),
initial_value=0, parameter_class=ManualParameter)
self.add_parameter(
'fl_dc_I0', unit='A', label='Flux bias sweet spot', docstring=(
'Flux bias offset corresponding to the sweetspot'),
vals=vals.Numbers(), initial_value=0,
parameter_class=ManualParameter)
# FIXME: ? not used anywhere
self.add_parameter(
'fl_dc_ch', label='Flux bias channel',
docstring=('Used to determine the DAC channel used for DC '
'flux biasing. Should be an int when using an IVVI rack'
'or a str (channel name) when using an SPI rack.'),
vals=vals.Strings(), initial_value=None,
parameter_class=ManualParameter)
# Currently this has only the parameters for 1 CZ gate.
# in the future there will be 5 distinct flux operations for which
# parameters have to be stored.
# cz to all nearest neighbours (of which 2 are only phase corr) and
# the "park" operation.
self.add_parameter('fl_cz_length', vals=vals.Numbers(),
unit='s', initial_value=35e-9,
parameter_class=ManualParameter)
self.add_parameter('fl_cz_lambda_2', vals=vals.Numbers(),
initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('fl_cz_lambda_3', vals=vals.Numbers(),
initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('fl_cz_theta_f', vals=vals.Numbers(),
unit='deg',
initial_value=80,
parameter_class=ManualParameter)
self.add_parameter('fl_cz_V_per_phi0', vals=vals.Numbers(),
unit='V', initial_value=1,
parameter_class=ManualParameter)
self.add_parameter('fl_cz_freq_01_max', vals=vals.Numbers(),
unit='Hz', parameter_class=ManualParameter)
self.add_parameter('fl_cz_J2', vals=vals.Numbers(),
unit='Hz',
initial_value=50e6,
parameter_class=ManualParameter)
self.add_parameter('fl_cz_freq_interaction', vals=vals.Numbers(),
unit='Hz',
parameter_class=ManualParameter)
self.add_parameter('fl_cz_phase_corr_length',
unit='s',
initial_value=5e-9, vals=vals.Numbers(),
parameter_class=ManualParameter)
self.add_parameter('fl_cz_phase_corr_amp',
unit='V',
initial_value=0, vals=vals.Numbers(),
parameter_class=ManualParameter)
def add_config_parameters(self):
self.add_parameter(
'cfg_trigger_period', label='Trigger period',
docstring=('Time between experiments, used to initialize all'
' qubits in the ground state'),
unit='s', initial_value=200e-6,
parameter_class=ManualParameter,
vals=vals.Numbers(min_value=1e-6, max_value=327668e-9))
self.add_parameter('cfg_openql_platform_fn',
label='OpenQL platform configuration filename',
parameter_class=ManualParameter,
vals=vals.Strings())
self.add_parameter(
'cfg_qubit_nr', label='Qubit number', vals=vals.Ints(0, 20),
parameter_class=ManualParameter, initial_value=0,
docstring='The qubit number is used in the OpenQL compiler. ')
self.add_parameter('cfg_qubit_freq_calc_method',
initial_value='latest',
parameter_class=ManualParameter,
vals=vals.Enum('latest', 'flux'))
self.add_parameter('cfg_rb_calibrate_method',
initial_value='restless',
parameter_class=ManualParameter,
vals=vals.Enum('restless', 'ORBIT'))
self.add_parameter('cfg_cycle_time',
initial_value=20e-9,
unit='s',
parameter_class=ManualParameter,
# this is to effictively hardcode the cycle time
vals=vals.Enum(20e-9))
# TODO: add docstring (Oct 2017)
self.add_parameter('cfg_prepare_ro_awg', vals=vals.Bool(),
docstring=('If False disables uploading pulses '
'to UHFQC'),
initial_value=True,
parameter_class=ManualParameter)
self.add_parameter('cfg_prepare_mw_awg', vals=vals.Bool(),
docstring=('If False disables uploading pulses '
'to AWG8'),
initial_value=True,
parameter_class=ManualParameter)
self.add_parameter('cfg_with_vsm', vals=vals.Bool(),
docstring=('to avoid using the VSM if set to False'
' bypasses all commands to vsm if set False'),
initial_value=True,
parameter_class=ManualParameter)
self.add_parameter('cfg_spec_mode', vals=vals.Bool(),
docstring=(
'Used to activate spec mode in measurements'),
initial_value=False,
parameter_class=ManualParameter)
def add_generic_qubit_parameters(self):
self.add_parameter('E_c', unit='Hz',
initial_value=300e6,
parameter_class=ManualParameter,
vals=vals.Numbers())
self.add_parameter('E_j', unit='Hz',
parameter_class=ManualParameter,
vals=vals.Numbers())
self.add_parameter('T1', unit='s',
parameter_class=ManualParameter,
vals=vals.Numbers(0, 200e-6))
self.add_parameter('T2_echo', unit='s',
parameter_class=ManualParameter,
vals=vals.Numbers(0, 200e-6))
self.add_parameter('T2_star', unit='s',
parameter_class=ManualParameter,
vals=vals.Numbers(0, 200e-6))
self.add_parameter('freq_qubit',
label='Qubit frequency', unit='Hz',
parameter_class=ManualParameter)
self.add_parameter('freq_max',
label='qubit sweet spot frequency', unit='Hz',
parameter_class=ManualParameter)
self.add_parameter('freq_res',
label='Resonator frequency', unit='Hz',
parameter_class=ManualParameter)
self.add_parameter('asymmetry', unit='',
docstring='Asymmetry parameter of the SQUID loop',
initial_value=0,
parameter_class=ManualParameter)
self.add_parameter('anharmonicity', unit='Hz',
label='Anharmonicity',
docstring='Anharmonicity, negative by convention',
parameter_class=ManualParameter,
# typical target value
initial_value=-300e6,
vals=vals.Numbers())
self.add_parameter('dispersive_shift',
label='Resonator dispersive shift', unit='Hz',
parameter_class=ManualParameter,
vals=vals.Numbers())
self.add_parameter('F_ssro',
initial_value=0,
label='Single shot readout assignment fidelity',
vals=vals.Numbers(0.0, 1.0),
parameter_class=ManualParameter)
self.add_parameter('F_discr',
initial_value=0,
label='Single shot readout discrimination fidelity',
vals=vals.Numbers(0.0, 1.0),
parameter_class=ManualParameter)
self.add_parameter('ro_rel_events',
initial_value=0,
label='relaxation errors from ssro fit',
vals=vals.Numbers(0.0, 1.0),
parameter_class=ManualParameter)
self.add_parameter('ro_res_ext',
initial_value=0,
label='residual extiction errors from ssro fit',
vals=vals.Numbers(0.0, 1.0),
parameter_class=ManualParameter)
self.add_parameter('F_RB',
initial_value=0,
label='RB single qubit Clifford fidelity',
vals=vals.Numbers(0, 1.0),
parameter_class=ManualParameter)
# Parameter helpers
def _using_QWG(self):
"""
Checks if a QWG is used for microwave control.
"""
AWG = self.instr_LutMan_MW.get_instr().AWG.get_instr()
return isinstance(AWG, QuTech_AWG_Module)
def _set_mw_vsm_delay(self, val):
# sort of a pseudo Manual Parameter
self.instr_CC.get_instr().set(
'vsm_channel_delay{}'.format(self.cfg_qubit_nr()), val)
self._mw_vsm_delay = val
def _get_mw_vsm_delay(self):
return self._mw_vsm_delay
def _set_mw_fine_delay(self, val):
if self.cfg_with_vsm():
logging.warning('CCL transmon is using VSM. Use mw_vsm_delay to'
'adjust delay')
else:
lutman = self.find_instrument(self.instr_LutMan_MW())
AWG = lutman.find_instrument(lutman.AWG())
if self._using_QWG():
logging.warning(
'CCL transmon is using QWG. mw_fine_delay not supported.')
else:
AWG.set('sigouts_{}_delay'.format(lutman.channel_I()-1), val)
AWG.set('sigouts_{}_delay'.format(lutman.channel_Q()-1), val)
self._mw_fine_delay = val
def _get_mw_fine_delay(self):
return self._mw_fine_delay
def _set_flux_fine_delay(self, val):
if self.instr_LutMan_Flux() is not None:
lutman = self.find_instrument(self.instr_LutMan_Flux())
AWG = lutman.find_instrument(lutman.AWG())
if self._using_QWG():
logging.warning('CCL transmon is using QWG. Not implemented.')
else:
AWG.set('sigouts_{}_delay'.format(
lutman.cfg_awg_channel()-1), val)
# val = AWG.get('sigouts_{}_delay'.format(lutman.cfg_awg_channel()-1))
else:
logging.warning(
'No Flux LutMan specified, could not set flux timing fine')
self._flux_fine_delay = val
def _get_flux_fine_delay(self):
return self._flux_fine_delay
##########################################################################
# Prepare functions
##########################################################################
def prepare_for_continuous_wave(self):
if 'optimal' in self.ro_acq_weight_type():
warnings.warn('Changing ro_acq_weight_type to SSB.')
self.ro_acq_weight_type('SSB')
if self.ro_acq_weight_type() not in {'DSB', 'SSB'}:
# this is because the CW acquisition detects using angle and phase
# and this requires two channels to rotate the signal properly.
raise ValueError('Readout "{}" '.format(self.ro_acq_weight_type())
+ 'weight type must be "SSB" or "DSB"')
if self.cfg_with_vsm():
self._prep_cw_configure_VSM()
self.prepare_readout(CW=True)
self._prep_cw_spec()
# source is turned on in measure spec when needed
self.instr_LO_mw.get_instr().off()
if self.instr_spec_source() != None:
self.instr_spec_source.get_instr().off()
if self.instr_spec_source_2() != None:
self.instr_spec_source_2.get_instr().off()
def prepare_readout(self, CW=False):
"""
Configures the readout. Consists of the following steps
- instantiate the relevant detector functions
- set the microwave frequencies and sources
- generate the RO pulse
- set the integration weights
"""
if self.cfg_prepare_ro_awg():
self.instr_acquisition.get_instr().load_default_settings(
upload_sequence=False)
self._prep_ro_pulse(CW=CW)
self._prep_ro_integration_weights()
self._prep_deskewing_matrix()
else:
warnings.warn(
'"cfg_prepare_ro_awg" set to False, not preparing readout .')
self._prep_ro_instantiate_detectors()
self._prep_ro_sources()
def prepare_for_timedomain(self):
self.prepare_readout()
self._prep_td_sources()
self._prep_mw_pulses()
if self.cfg_with_vsm():
self._prep_td_configure_VSM()
def prepare_for_fluxing(self, reset=True):
pass
def prepare_characterizing(self, exceptions: list = [], verbose=True):
"""
Prepares the qubit for (automatic) characterisation. Will park all
other qubits in the device object to their 'anti-sweetspot' (which is a
sweetspot as well technically speaking). Afterwards, it will move
the qubit to be characterized (self) to its sweetspot.
Will ignore any qubit whose name (string) is in 'exceptions'
"""
fluxcurrent = self.instr_FluxCtrl.get_instr()
device = self.instr_device.get_instr()
exceptions.append('fakequbit')
Qs = device.qubits()
for Q in Qs:
if device.find_instrument(Q).fl_dc_I_per_phi0() == 1:
exceptions.append(Q)
# exceptions.append('D2')
# First park all other qubits to anti sweetspot
print('Moving other qubits away ...')
for qubit_name in device.qubits():
if (qubit_name not in exceptions) and (qubit_name != self.name):
qubit = device.find_instrument(qubit_name)
channel = qubit.fl_dc_ch()
current = qubit.fl_dc_I0() + qubit.fl_dc_I_per_phi0()/2
fluxcurrent[channel](current)
if verbose:
print('\t Moving {} to {:.3f} mA'
.format(qubit_name, current/1e-3))
# Move self to sweetspot:
if verbose:
print('Moving {} to {:.3f} mA'.format(
self.name, self.fl_dc_I0()/1e-3))
fluxcurrent[self.fl_dc_ch()](self.fl_dc_I0())
return True
def get_int_avg_det(self, **kw):
"""
Instantiates an integration average detector using parameters from
the qubit object. **kw get passed on to the class when instantiating
the detector function.
"""
if self.ro_acq_weight_type() == 'optimal':
ro_channels = [self.ro_acq_weight_chI()]
if self.ro_acq_digitized():
result_logging_mode = 'digitized'
else:
result_logging_mode = 'lin_trans'
else:
ro_channels = [self.ro_acq_weight_chI(),
self.ro_acq_weight_chQ()]
result_logging_mode = 'raw'
int_avg_det = det.UHFQC_integrated_average_detector(
UHFQC=self.instr_acquisition.get_instr(),
AWG=self.instr_CC.get_instr(),
channels=ro_channels,
result_logging_mode=result_logging_mode,
nr_averages=self.ro_acq_averages(),
integration_length=self.ro_acq_integration_length(), **kw)
return int_avg_det
##########################################################################
# Prepare functions: private
##########################################################################
def _prep_cw_spec(self):
# FIXME: this code block has no effect
if self.cfg_with_vsm():
VSM = self.instr_VSM.get_instr()
if self.spec_type() == 'CW':
marker_source = 'int'
else:
marker_source = 'ext'
if self.instr_spec_source() != None:
self.instr_spec_source.get_instr().power(self.spec_pow())
# FIXME: UHFQC specific
def _prep_deskewing_matrix(self):
UHFQC = self.instr_acquisition.get_instr()
alpha = self.ro_acq_mixer_alpha()
phi = self.ro_acq_mixer_phi()
predistortion_matrix = np.array(
((1, -alpha * np.sin(phi * 2 * np.pi / 360)),
(0, alpha * np.cos(phi * 2 * np.pi / 360))))
UHFQC.qas_0_deskew_rows_0_cols_0(predistortion_matrix[0, 0])
UHFQC.qas_0_deskew_rows_0_cols_1(predistortion_matrix[0, 1])
UHFQC.qas_0_deskew_rows_1_cols_0(predistortion_matrix[1, 0])
UHFQC.qas_0_deskew_rows_1_cols_1(predistortion_matrix[1, 1])
return predistortion_matrix
def _prep_ro_instantiate_detectors(self):
self.instr_MC.get_instr().soft_avg(self.ro_soft_avg())
if 'optimal' in self.ro_acq_weight_type():
if self.ro_acq_weight_type() == 'optimal':
ro_channels = [self.ro_acq_weight_chI()]
elif self.ro_acq_weight_type() == 'optimal IQ':
ro_channels = [
self.ro_acq_weight_chI(), self.ro_acq_weight_chQ()]
result_logging_mode = 'lin_trans'
if self.ro_acq_digitized():
result_logging_mode = 'digitized'
# Update the RO theshold
acq_ch = self.ro_acq_weight_chI()
# The threshold that is set in the hardware needs to be
# corrected for the offset as this is only applied in
# software.
if abs(self.ro_acq_threshold()) > 32:
threshold = 32
warnings.warn('Clipping {}.ro_acq_threshold {}>32'.format(
self.name, self.ro_acq_threshold()))
# working around the limitation of threshold in UHFQC
# which cannot be >abs(32).
else:
threshold = self.ro_acq_threshold()
self.instr_acquisition.get_instr().set(
'qas_0_thresholds_{}_level'.format(acq_ch), threshold)
else:
ro_channels = [self.ro_acq_weight_chI(),
self.ro_acq_weight_chQ()]
result_logging_mode = 'raw'
if 'UHFQC' in self.instr_acquisition():
UHFQC = self.instr_acquisition.get_instr()
self.input_average_detector = det.UHFQC_input_average_detector(
UHFQC=UHFQC,
AWG=self.instr_CC.get_instr(),
nr_averages=self.ro_acq_averages(),
nr_samples=int(self.ro_acq_input_average_length()*1.8e9))
self.int_avg_det = self.get_int_avg_det()
self.int_avg_det_single = det.UHFQC_integrated_average_detector(
UHFQC=UHFQC, AWG=self.instr_CC.get_instr(),
channels=ro_channels,
result_logging_mode=result_logging_mode,
nr_averages=self.ro_acq_averages(),
real_imag=True, single_int_avg=True,
integration_length=self.ro_acq_integration_length())
self.UHFQC_spec_det = det.UHFQC_spectroscopy_detector(
UHFQC=UHFQC, ro_freq_mod=self.ro_freq_mod(),
AWG=self.instr_CC.get_instr(), channels=ro_channels,
nr_averages=self.ro_acq_averages(),
integration_length=self.ro_acq_integration_length())
self.int_log_det = det.UHFQC_integration_logging_det(
UHFQC=UHFQC, AWG=self.instr_CC.get_instr(),
channels=ro_channels,
result_logging_mode=result_logging_mode,
integration_length=self.ro_acq_integration_length())
else:
raise NotImplementedError()
# def _prep_ro_sources(self):
# LO = self.instr_LO_ro.get_instr()
# LO.frequency.set(self.ro_freq() - self.ro_freq_mod())
# LO.on()
# LO.power(self.ro_pow_LO())
def _prep_ro_sources(self):
if self.instr_LutMan_RO.get_instr().LO_freq is not None:
log.info('Warning: This qubit is using a fixed RO LO frequency.')
LO = self.instr_LO_ro.get_instr()
Lo_Lutman = self.instr_LutMan_RO.get_instr()
LO_freq = Lo_Lutman.LO_freq()
LO.frequency.set(LO_freq)
mod_freq = self.ro_freq() - LO_freq
self.ro_freq_mod(mod_freq)
log.info("Setting modulation freq of {} to {}".format(self.name, mod_freq))
else:
LO = self.instr_LO_ro.get_instr()
LO.frequency.set(self.ro_freq() - self.ro_freq_mod())
LO.on()
LO.power(self.ro_pow_LO())
# def _prep_ro_sources(self, qubits):
# """
# turn on and configure the RO LO's of all qubits to be measured.
# """
# for qb_name in qubits:
# LO = self.find_instrument(qb_name).instr_LO_ro.get_instr()
# LO.frequency.set(self.ro_lo_freq())
# LO.power(self.ro_pow_LO())
# LO.on()
def _prep_ro_pulse(self, upload=True, CW=False):
"""
Sets the appropriate parameters in the RO LutMan and uploads the
desired wave.
Relevant parameters are:
ro_pulse_type ("up_down_down", "square")
ro_freq_mod
ro_acq_delay
ro_pulse_length
ro_pulse_amp
ro_pulse_phi
ro_pulse_down_length0
ro_pulse_down_amp0
ro_pulse_down_phi0
ro_pulse_down_length1
ro_pulse_down_amp1
ro_pulse_down_phi1
ro_pulse_mixer_alpha
ro_pulse_mixer_phi
ro_pulse_mixer_offs_I
ro_pulse_mixer_offs_Q
"""
if CW:
ro_amp = self.ro_pulse_amp_CW()
else:
ro_amp = self.ro_pulse_amp()
if 'UHFQC' not in self.instr_acquisition():
raise NotImplementedError()
UHFQC = self.instr_acquisition.get_instr()
if 'gated' in self.ro_pulse_type().lower():
UHFQC.awg_sequence_acquisition()
else:
ro_lm = self.instr_LutMan_RO.get_instr()
ro_lm.AWG(self.instr_acquisition())
idx = self.cfg_qubit_nr()
# These parameters affect all resonators
ro_lm.set('resonator_combinations', [[idx]])
ro_lm.set('pulse_type', 'M_' + self.ro_pulse_type())
ro_lm.set('mixer_alpha',
self.ro_pulse_mixer_alpha())
ro_lm.set('mixer_phi',
self.ro_pulse_mixer_phi())
ro_lm.set('M_modulation_R{}'.format(idx), self.ro_freq_mod())
ro_lm.set('M_length_R{}'.format(idx),
self.ro_pulse_length())
ro_lm.set('M_amp_R{}'.format(idx),
ro_amp)
ro_lm.set('M_delay_R{}'.format(idx),
self.ro_pulse_delay())
ro_lm.set('M_phi_R{}'.format(idx),
self.ro_pulse_phi())
ro_lm.set('M_down_length0_R{}'.format(idx),
self.ro_pulse_down_length0())
ro_lm.set('M_down_amp0_R{}'.format(idx),
self.ro_pulse_down_amp0())
ro_lm.set('M_down_phi0_R{}'.format(idx),
self.ro_pulse_down_phi0())
ro_lm.set('M_down_length1_R{}'.format(idx),
self.ro_pulse_down_length1())
ro_lm.set('M_down_amp1_R{}'.format(idx),
self.ro_pulse_down_amp1())
ro_lm.set('M_down_phi1_R{}'.format(idx),
self.ro_pulse_down_phi1())
ro_lm.acquisition_delay(self.ro_acq_delay())
if upload:
ro_lm.load_DIO_triggered_sequence_onto_UHFQC()
UHFQC.sigouts_0_offset(self.ro_pulse_mixer_offs_I())
UHFQC.sigouts_1_offset(self.ro_pulse_mixer_offs_Q())
if [self.cfg_qubit_nr()] not in ro_lm.resonator_combinations():
warnings.warn('Qubit number of {} is not '.format(self.name) +
'present in resonator_combinations of the readout lutman.')
def _prep_ro_integration_weights(self):
"""
Sets the ro acquisition integration weights.
The relevant parameters here are
ro_acq_weight_type -> 'SSB', 'DSB' or 'Optimal'
ro_acq_weight_chI -> Specifies which integration weight
(channel) to use
ro_acq_weight_chQ -> The second channel in case of SSB/DSB
RO_acq_weight_func_I -> A custom integration weight (array)
RO_acq_weight_func_Q -> ""
"""
if 'UHFQC' in self.instr_acquisition():
UHFQC = self.instr_acquisition.get_instr()
if self.ro_acq_weight_type() == 'SSB':
UHFQC.prepare_SSB_weight_and_rotation(
IF=self.ro_freq_mod(),
weight_function_I=self.ro_acq_weight_chI(),
weight_function_Q=self.ro_acq_weight_chQ())
elif self.ro_acq_weight_type() == 'DSB':
UHFQC.prepare_DSB_weight_and_rotation(
IF=self.ro_freq_mod(),
weight_function_I=self.ro_acq_weight_chI(),
weight_function_Q=self.ro_acq_weight_chQ())
elif 'optimal' in self.ro_acq_weight_type():
if (self.ro_acq_weight_func_I() is None or
self.ro_acq_weight_func_Q() is None):
logging.warning('Optimal weights are None,' +
' not setting integration weights')
elif self.ro_acq_rotated_SSB_when_optimal():
# this allows bypasing the optimal weights for poor SNR qubits
# working around the limitation of threshold in UHFQC
# which cannot be >abs(32)
if self.ro_acq_digitized() and abs(self.ro_acq_threshold()) > 32:
scaling_factor = 32/self.ro_acq_threshold()
else:
scaling_factor = 1
UHFQC.prepare_SSB_weight_and_rotation(
IF=self.ro_freq_mod(),
weight_function_I=self.ro_acq_weight_chI(),
weight_function_Q=None,
rotation_angle=self.ro_acq_rotated_SSB_rotation_angle(),
length=self.ro_acq_integration_length_weigth_function(),
scaling_factor=scaling_factor)
else:
# When optimal weights are used, only the RO I weight
# channel is used
# FIXME!: Dirty hack because of qusurf issue #63, adds
# delay samples in the optimized weights
opt_WI = self.ro_acq_weight_func_I()
opt_WQ = self.ro_acq_weight_func_Q()
del_sampl = self.ro_acq_weight_func_delay_samples_hack()
if del_sampl > 0:
zeros = np.zeros(abs(del_sampl))
opt_WI = np.concatenate(
[opt_WI[abs(del_sampl):], zeros])
opt_WQ = np.concatenate(
[opt_WQ[abs(del_sampl):], zeros])
elif del_sampl < 0:
zeros = np.zeros(abs(del_sampl))
opt_WI = np.concatenate(
[zeros, opt_WI[:-abs(del_sampl)]])
opt_WQ = np.concatenate(
[zeros, opt_WQ[:-abs(del_sampl)]])
else:
pass
UHFQC.set('qas_0_integration_weights_{}_real'.format(
self.ro_acq_weight_chI()), opt_WI)
UHFQC.set('qas_0_integration_weights_{}_imag'.format(
self.ro_acq_weight_chI()), opt_WQ)
UHFQC.set('qas_0_rotations_{}'.format(
self.ro_acq_weight_chI()), 1.0 - 1.0j)
if self.ro_acq_weight_type() == 'optimal IQ':
print('setting the optimal Q')
UHFQC.set('qas_0_integration_weights_{}_real'.format(
self.ro_acq_weight_chQ()), opt_WQ)
UHFQC.set('qas_0_integration_weights_{}_imag'.format(
self.ro_acq_weight_chQ()), opt_WI)
UHFQC.set('qas_0_rotations_{}'.format(
self.ro_acq_weight_chQ()), 1.0 + 1.0j)
else:
raise NotImplementedError(
'CBox, DDM or other are currently not supported')
def _prep_td_sources(self):
# if self.instr_spec_source() is not None:
# self.instr_spec_source.get_instr().off()
# self.instr_LO_mw.get_instr().on()
# self.instr_LO_mw.get_instr().pulsemod_state(False)
# # Set source to fs =f-f_mod such that pulses appear at f = fs+f_mod
# self.instr_LO_mw.get_instr().frequency.set(
# self.freq_qubit.get() - self.mw_freq_mod.get())
# self.instr_LO_mw.get_instr().power.set(self.mw_pow_td_source.get())
MW_LutMan = self.instr_LutMan_MW.get_instr()
if self.instr_spec_source() is not None:
self.instr_spec_source.get_instr().off()
self.instr_LO_mw.get_instr().on()
self.instr_LO_mw.get_instr().pulsemod_state(False)
if MW_LutMan.cfg_sideband_mode() == 'static':
# Set source to fs =f-f_mod such that pulses appear at f = fs+f_mod
self.instr_LO_mw.get_instr().frequency.set(
self.freq_qubit.get() - self.mw_freq_mod.get())
elif MW_LutMan.cfg_sideband_mode() == 'real-time':
# For historic reasons, will maintain the change qubit frequency here in
# _prep_td_sources, even for real-time mode, where it is only changed in the HDAWG
if ((MW_LutMan.channel_I()-1)//2 != (MW_LutMan.channel_Q()-1)//2):
raise KeyError('In real-time sideband mode, channel I/Q should share same awg group.')
self.mw_freq_mod(self.freq_qubit.get() - self.instr_LO_mw.get_instr().frequency.get())
MW_LutMan.AWG.get_instr().set('oscs_{}_freq'.format((MW_LutMan.channel_I()-1)//2),
self.mw_freq_mod.get())
else:
raise ValueError('Unexpected value for parameter cfg_sideband_mode.')
self.instr_LO_mw.get_instr().power.set(self.mw_pow_td_source.get())
def _prep_mw_pulses(self):
# 1. Gets instruments and prepares cases
MW_LutMan = self.instr_LutMan_MW.get_instr()
AWG = MW_LutMan.AWG.get_instr()
# 2. Prepares map and parameters for waveforms
# (except pi-pulse amp, which depends on VSM usage)
MW_LutMan.mw_amp90_scale(self.mw_amp90_scale())
MW_LutMan.mw_gauss_width(self.mw_gauss_width())
MW_LutMan.channel_amp(self.mw_channel_amp())
MW_LutMan.channel_range(self.mw_channel_range())
MW_LutMan.mw_motzoi(self.mw_motzoi())
MW_LutMan.mw_modulation(self.mw_freq_mod())
MW_LutMan.spec_amp(self.spec_amp())
# used for ef pulsing
MW_LutMan.mw_ef_amp180(self.mw_ef_amp())
# MW_LutMan.mw_ef_modulation(MW_LutMan.mw_modulation() +
# self.anharmonicity())
if MW_LutMan.cfg_sideband_mode() != 'real-time':
MW_LutMan.mw_ef_modulation(MW_LutMan.mw_modulation() +
self.anharmonicity())
else:
MW_LutMan.mw_ef_modulation(self.anharmonicity())
# 3. Does case-dependent things:
# mixers offset+skewness
# pi-pulse amplitude
if self.cfg_with_vsm():
# case with VSM (both QWG and AWG8)
MW_LutMan.mw_amp180(self.mw_amp180())
MW_LutMan.G_mixer_phi(self.mw_G_mixer_phi())
MW_LutMan.G_mixer_alpha(self.mw_G_mixer_alpha())
MW_LutMan.D_mixer_phi(self.mw_D_mixer_phi())
MW_LutMan.D_mixer_alpha(self.mw_D_mixer_alpha())
MW_LutMan.channel_GI(0+self.mw_awg_ch())
MW_LutMan.channel_GQ(1+self.mw_awg_ch())
MW_LutMan.channel_DI(2+self.mw_awg_ch())
MW_LutMan.channel_DQ(3+self.mw_awg_ch())
if self._using_QWG():
# N.B. This part is QWG specific
if hasattr(MW_LutMan, 'channel_GI'):
# 4-channels are used for VSM based AWG's.
AWG.ch1_offset(self.mw_mixer_offs_GI())
AWG.ch2_offset(self.mw_mixer_offs_GQ())
AWG.ch3_offset(self.mw_mixer_offs_DI())
AWG.ch4_offset(self.mw_mixer_offs_DQ())
else: # using_AWG8
# N.B. This part is AWG8 specific
AWG.set('sigouts_{}_offset'.format(self.mw_awg_ch()-1),
self.mw_mixer_offs_GI())
AWG.set('sigouts_{}_offset'.format(self.mw_awg_ch()+0),
self.mw_mixer_offs_GQ())
AWG.set('sigouts_{}_offset'.format(self.mw_awg_ch()+1),
self.mw_mixer_offs_DI())
AWG.set('sigouts_{}_offset'.format(self.mw_awg_ch()+2),
self.mw_mixer_offs_DQ())
else:
if self._using_QWG():
# case without VSM and with QWG
if ((self.mw_G_mixer_phi() != self.mw_D_mixer_phi())
or (self.mw_G_mixer_alpha() != self.mw_D_mixer_alpha())):
logging.warning('CCL_Transmon {}; _prep_mw_pulses: '
'no VSM detected, using mixer parameters'
' from gaussian channel.'.format(self.name))
MW_LutMan.mixer_phi(self.mw_G_mixer_phi())
MW_LutMan.mixer_alpha(self.mw_G_mixer_alpha())
AWG.set('ch{}_offset'.format(MW_LutMan.channel_I()),
self.mw_mixer_offs_GI())
AWG.set('ch{}_offset'.format(MW_LutMan.channel_Q()),
self.mw_mixer_offs_GQ())
else:
# case without VSM (and AWG8)
MW_LutMan.mw_amp180(1)
MW_LutMan.mixer_phi(self.mw_G_mixer_phi())
MW_LutMan.mixer_alpha(self.mw_G_mixer_alpha())
# N.B. This part is AWG8 specific
AWG.set('sigouts_{}_offset'.format(self.mw_awg_ch()-1),
self.mw_mixer_offs_GI())
AWG.set('sigouts_{}_offset'.format(self.mw_awg_ch()+0),
self.mw_mixer_offs_GQ())
# 4. reloads the waveforms
if self.cfg_prepare_mw_awg():
MW_LutMan.load_waveforms_onto_AWG_lookuptable()
else:
warnings.warn('"cfg_prepare_mw_awg" set to False, '
'not preparing microwave pulses.')
# 5. upload commandtable for virtual-phase gates
MW_LutMan.upload_single_qubit_phase_corrections() # FIXME: assumes AWG8_MW_LutMan
def _prep_td_configure_VSM(self):
# Configure VSM
VSM = self.instr_VSM.get_instr()
VSM.set('ch{}_frequency'.format(
self.mw_vsm_ch_in()), self.freq_qubit())
for mod in range(1, 9):
VSM.set('mod{}_ch{}_marker_state'.format(
mod, self.spec_vsm_ch_in()), 'off')
VSM.set('mod{}_ch{}_marker_state'.format(
self.mw_vsm_mod_out(), self.mw_vsm_ch_in()), 'on')
VSM.set('mod{}_marker_source'.format(
self.mw_vsm_mod_out()), self.mw_vsm_marker_source())
VSM.set('mod{}_ch{}_derivative_amp'.format(
self.mw_vsm_mod_out(), self.mw_vsm_ch_in()), self.mw_vsm_D_amp())
VSM.set('mod{}_ch{}_derivative_phase'.format(
self.mw_vsm_mod_out(), self.mw_vsm_ch_in()), self.mw_vsm_D_phase())
VSM.set('mod{}_ch{}_gaussian_amp'.format(
self.mw_vsm_mod_out(), self.mw_vsm_ch_in()), self.mw_vsm_G_amp())
VSM.set('mod{}_ch{}_gaussian_phase'.format(
self.mw_vsm_mod_out(), self.mw_vsm_ch_in()), self.mw_vsm_G_phase())
self.instr_CC.get_instr().set(
'vsm_channel_delay{}'.format(self.cfg_qubit_nr()),
self.mw_vsm_delay())
def _prep_cw_configure_VSM(self):
# Configure VSM
VSM = self.instr_VSM.get_instr()
for mod in range(1, 9):
VSM.set('mod{}_ch{}_marker_state'.format(
mod, self.mw_vsm_ch_in()), 'off')
VSM.set('mod{}_ch{}_marker_state'.format(
self.mw_vsm_mod_out(), self.spec_vsm_ch_in()), 'on')
VSM.set('mod{}_marker_source'.format(
self.mw_vsm_mod_out()), self.mw_vsm_marker_source())
##########################################################################
# find_ functions (CCLight_Transmon specific)
##########################################################################
def find_frequency_adaptive(self, f_start=None, f_span=1e9, f_step=0.5e6,
MC=None, update=True, use_max=False,
spec_mode='pulsed_marked', verbose=True):
"""
'Adaptive' measurement for finding the qubit frequency. Will look with
a range of the current frequency estimate, and if it does not find a
peak it will move and look f_span Hz above and below the estimate. Will
continue to do such a shift until a peak is found.
"""
if MC is None:
MC = self.instr_MC.get_instr()
if f_start is None:
f_start = self.freq_qubit()
# Set high power and averages to be sure we find the peak.
# self.spec_pow(-30)
# self.ro_pulse_amp_CW(0.025)
# old_avg = self.ro_acq_averages()
# self.ro_acq_averages(2**15)
# Repeat measurement while no peak is found:
success = False
f_center = f_start
n = 0
while not success:
success = None
f_center += f_span*n*(-1)**n
n += 1
if verbose:
cfreq, cunit = plt_tools.SI_val_to_msg_str(
f_center, 'Hz', float)
sfreq, sunit = plt_tools.SI_val_to_msg_str(f_span, 'Hz', float)
print('Doing adaptive spectroscopy around {:.3f} {} with a '
'span of {:.0f} {}.'.format(cfreq, cunit, sfreq, sunit))
freqs = np.arange(f_center - f_span/2, f_center + f_span/2, f_step)
self.measure_spectroscopy(MC=MC, freqs=freqs, mode=spec_mode,
analyze=False)
label = 'spec'
# Use 'try' because it can give a TypeError when no peak is found
try:
analysis_spec = ma.Qubit_Spectroscopy_Analysis(label=label,
close_fig=True,
qb_name=self.name)
except TypeError:
logging.warning('TypeError in Adaptive spectroscopy')
continue
# Check for peak and check its height
freq_peak = analysis_spec.peaks['peak']
offset = analysis_spec.fit_res.params['offset'].value
peak_height = np.amax(analysis_spec.data_dist)
# Check if peak is not another qubit, and if it is move that qubit away
for qubit_name in self.instr_device.get_instr().qubits():
qubit = self.instr_device.get_instr().find_instrument(qubit_name)
if qubit.name != self.name and qubit.freq_qubit() is not None:
if np.abs(qubit.freq_qubit()-freq_peak) < 5e6:
if verbose:
logging.warning('Peak found at frequency of {}. '
'Adjusting currents'
.format(qubit.name))
fluxcurrent = self.instr_FluxCtrl.get_instr()
old_current = fluxcurrent[qubit.fl_dc_ch()]()
fluxcurrent[qubit.fl_dc_ch()](5e-3)
n -= 1
success = False
if success is None:
if freq_peak is None:
success = False
elif peak_height < 4*offset:
success = False
elif peak_height < 3*np.mean(analysis_spec.data_dist):
success = False
else:
success = True
# self.ro_acq_averages(old_avg)
if update:
if use_max:
self.freq_qubit(analysis_spec.peaks['peak'])
else:
self.freq_qubit(analysis_spec.fitted_freq)
return True
def find_qubit_sweetspot(self, freqs=None, dac_values=None, update=True,
set_to_sweetspot=True, method='DAC', fluxChan=None,
spec_mode='pulsed_marked'):
"""
Should be edited such that it contains reference to different measurement
methods (tracking / 2D scan / broad spectroscopy)
method = 'DAC' - uses ordinary 2D DAC scan
'tracked - uses tracked spectroscopy (not really implemented)'
TODO: If spectroscopy does not yield a peak, it should discard it
"""
if freqs is None:
freq_center = self.freq_qubit()
freq_range = 50e6
freqs = np.arange(freq_center - freq_range, freq_center + freq_range,
1e6)
if dac_values is None:
if self.fl_dc_I0() is not None:
dac_values = np.linspace(self.fl_dc_I0() - 1e-3,
self.fl_dc_I0() + 1e-3, 8)
else:
dac_values = np.linspace(-0.5e3, 0.5e-3, 10)
if fluxChan is None:
if self.fl_dc_ch() is not None:
fluxChan = self.fl_dc_ch()
else:
logging.error('No fluxchannel found or specified. Please '
'specify fluxChan')
if method == 'DAC':
t_start = time.strftime('%Y%m%d_%H%M%S')
self.measure_qubit_frequency_dac_scan(freqs=freqs,
dac_values=dac_values,
fluxChan=fluxChan,
analyze=False,
mode=spec_mode,
nested_resonator_calibration=False,
# nested_resonator_calibration_use_min=False,
resonator_freqs=np.arange(-5e6, 5e6, 0.2e6)+self.freq_res())
timestamp = a_tools.get_timestamps_in_range(t_start,
label='Qubit_dac_scan' +
self.msmt_suffix)
timestamp = timestamp[0]
a = ma2.da.DAC_analysis(timestamp=timestamp)
self.fl_dc_polycoeff(a.dac_fit_res['fit_polycoeffs'])
sweetspot_current = a.dac_fit_res['sweetspot_dac']
elif method == 'tracked':
t_start = time.strftime('%Y%m%d_%H%M%S')
for i, dac_value in enumerate(dac_values):
self.instr_FluxCtrl.get_instr()[self.fl_dc_ch()](dac_value)
if i == 0:
self.find_frequency(freqs=freqs, update=True)
else:
self.find_frequency(update=True)
t_end = time.strftime('%Y%m%d_%H%M%S')
a = ma2.DACarcPolyFit(t_start=t_start, t_stop=t_end,
label='spectroscopy__' + self.name,
dac_key='Instrument settings.fluxcurrent.'+self.fl_dc_ch(),
degree=2)
pc = a.fit_res['fit_polycoeffs']
self.fl_dc_polycoeff(pc)
sweetspot_current = -pc[1]/(2*pc[0])
else:
logging.error('Sweetspot method {} unknown. '
'Use "DAC" or "tracked".'.format(method))
if update:
self.fl_dc_I0(sweetspot_current)
self.freq_max(self.calc_current_to_freq(sweetspot_current))
if set_to_sweetspot:
self.instr_FluxCtrl.get_instr()[self.fl_dc_ch()](sweetspot_current)
# Sanity check: does this peak move with flux?
check_vals = [self.calc_current_to_freq(np.min(dac_values)),
self.calc_current_to_freq(self.fl_dc_I0()),
self.calc_current_to_freq(np.max(dac_values))]
if check_vals[0] == pytest.approx(check_vals[1], abs=0.5e6):
if check_vals[0] == pytest.approx(check_vals[2], abs=0.5e6):
if check_vals[1] == pytest.approx(check_vals[2], abs=0.5e6):
logging.warning('No qubit shift found with varying flux. '
'Peak is not a qubit')
return False
if self.fl_dc_polycoeff()[1] < 1e6 and self.fl_dc_polycoeff()[2] < 1e6:
logging.warning('No qubit shift found with varying flux. Peak is '
'not a qubit')
return False
return True
def find_qubit_sweetspot_1D(self, freqs=None, dac_values=None):
# self.spec_pow(-30)
self.ro_acq_averages(2**14)
if dac_values is None:
if self.fl_dc_I0() is not None:
dac_values = np.linspace(self.fl_dc_I0() - 1e-3,
self.fl_dc_I0() + 1e-3, 8)
else:
dac_values = np.linspace(-1e3, 1e-3, 8)
if freqs is None:
freq_center = self.freq_qubit()
freq_range = 50e6
freqs = np.arange(freq_center - freq_range, freq_center + freq_range,
0.5e6)
Qubit_frequency = []
Reson_frequency = []
flux_channel = self.fl_dc_ch()
for dac_value in dac_values:
# Set Flux Current
self.instr_FluxCtrl.get_instr()[flux_channel](dac_value)
# Find Resonator
self.find_resonator_frequency(freqs=np.arange(-5e6, 5.1e6, .1e6)+self.freq_res(),
use_min=True)
# Find Qubit frequency
self.find_frequency(freqs=freqs)
Qubit_frequency.append(self.freq_qubit())
Reson_frequency.append(self.freq_res())
# Fit sweetspot with second degree polyfit
fit_coefs = np.polyfit(dac_values, Qubit_frequency, deg=2)
sweetspot_current = fit_coefs[1]/(2*fit_coefs[0])
# Set Flux Current to sweetspot
self.instr_FluxCtrl.get_instr()[flux_channel](sweetspot_current)
self.find_resonator_frequency(freqs=np.arange(-5e6, 5.1e6, .1e6)+self.freq_res(),
use_min=True)
frequency_sweet_spot = self.find_frequency(
freqs=np.arange(-50e6, 50e6, .5e6)+self.freq_qubit())
return frequency_sweet_spot
def find_anharmonicity_estimate(self, freqs=None, anharmonicity=None,
mode='pulsed_marked', update=True, power_12=10):
"""
Finds an estimate of the anharmonicity by doing a spectroscopy around
150 MHz below the qubit frequency.
TODO: if spec_pow is too low/high, it should adjust it to approx the
ideal spec_pow + 25 dBm
"""
if anharmonicity is None:
# Standard estimate, negative by convention
anharmonicity = self.anharmonicity()
f02_estimate = self.freq_qubit()*2 + anharmonicity
if freqs is None:
freq_center = f02_estimate/2
freq_range = 175e6
freqs = np.arange(freq_center-1/2*freq_range, self.freq_qubit()+1/2*freq_range,
0.5e6)
old_spec_pow = self.spec_pow()
self.spec_pow(self.spec_pow()+power_12)
self.measure_spectroscopy(freqs=freqs, mode=mode, analyze=False)
a = ma.Qubit_Spectroscopy_Analysis(label=self.msmt_suffix,
analyze_ef=True)
self.spec_pow(old_spec_pow)
f02 = 2*a.params['f0_gf_over_2'].value
if update:
self.anharmonicity(f02-2*self.freq_qubit())
return True
def find_bus_frequency(self, freqs, spec_source_bus, bus_power, f01=None,
label='', close_fig=True, analyze=True, MC=None,
prepare_for_continuous_wave=True):
"""
Drive the qubit and sit at the spectroscopy peak while the bus is driven with
bus_spec_source
Args:
freqs (array):
list of frequencies of the second drive tone (at bus frequency)
spec_source_bus (RohdeSchwarz_SGS100A):
rf source used for the second spectroscopy tone
bus_power (float):
power of the second spectroscopy tone
f_01 (float):
frequency of 01 transition (default: self.freq_qubit())
analyze (bool):
indicates whether to look for peas in the data and perform a fit
label (str):
suffix to append to the measurement label
prepare_for_continuous_wave (bool):
indicates whether to regenerate a waveform
generating a readout tone and set all the instruments according
to the parameters stored in the qubit object
"""
if f01 is None:
f01 = self.freq_qubit()
UHFQC = self.instr_acquisition.get_instr()
if prepare_for_continuous_wave:
self.prepare_for_continuous_wave()
if MC is None:
MC = self.instr_MC.get_instr()
# Starting specmode if set in config
if self.cfg_spec_mode():
UHFQC.spec_mode_on(IF=self.ro_freq_mod(),
ro_amp=self.ro_pulse_amp_CW())
# Snippet here to create and upload the CCL instructions
CCL = self.instr_CC.get_instr()
p = sqo.pulsed_spec_seq(
qubit_idx=self.cfg_qubit_nr(),
spec_pulse_length=self.spec_pulse_length(),
platf_cfg=self.cfg_openql_platform_fn())
CCL.eqasm_program(p.filename)
# CCL gets started in the int_avg detector
spec_source = self.instr_spec_source.get_instr()
spec_source.on()
spec_source.frequency(f01)
# spec_source.power(self.spec_pow())
spec_source_bus.on()
spec_source_bus.power(bus_power)
MC.set_sweep_function(spec_source_bus.frequency)
MC.set_sweep_points(freqs)
if self.cfg_spec_mode():
print('Enter loop')
MC.set_detector_function(self.UHFQC_spec_det)
else:
self.int_avg_det_single._set_real_imag(False)
MC.set_detector_function(self.int_avg_det_single)
MC.run(name='Bus_spectroscopy_'+self.msmt_suffix+label)
spec_source_bus.off()
# Stopping specmode
if self.cfg_spec_mode():
UHFQC.spec_mode_off()
self._prep_ro_pulse(upload=True)
if analyze:
ma.Qubit_Spectroscopy_Analysis(label=self.msmt_suffix,
close_fig=close_fig,
qb_name=self.name)
##########################################################################
# calibrate_ functions (CCLight_Transmon specific)
##########################################################################
def calibrate_ro_pulse_amp_CW(self, freqs=None, powers=None, update=True):
"""
Does a resonator power scan and determines at which power the low power
regime is exited. If update=True, will set the readout power to this
power.
"""
if freqs is None:
freq_center = self.freq_res()
freq_range = 10e6
freqs = np.arange(freq_center - freq_range/2,
freq_center + freq_range/2,
0.1e6)
if powers is None:
powers = np.arange(-40, 0.1, 8)
self.measure_resonator_power(freqs=freqs, powers=powers, analyze=False)
fit_res = ma.Resonator_Powerscan_Analysis(label='Resonator_power_scan',
close_fig=True)
if update:
ro_pow = 10**(fit_res.power/20)
self.ro_pulse_amp_CW(ro_pow)
self.ro_pulse_amp(ro_pow)
self.freq_res(fit_res.f_low)
if self.freq_qubit() is None:
f_qubit_estimate = self.freq_res() + (65e6)**2/(fit_res.shift)
logging.info('No qubit frquency found. Updating with RWA to {}'
.format(f_qubit_estimate))
self.freq_qubit(f_qubit_estimate)
return True
def calibrate_mw_pulse_amplitude_coarse(self,
amps=None,
close_fig=True, verbose=False,
MC=None, update=True,
all_modules=False):
"""
Calibrates the pulse amplitude using a single rabi oscillation.
Depending on self.cfg_with_vsm uses VSM or AWG channel amplitude
to sweep the amplitude of the pi pulse
For details see self.measure_rabi
"""
if amps is None:
if self.cfg_with_vsm():
amps = np.linspace(0.1, 1, 31)
else:
amps = np.linspace(0, 1, 31)
self.measure_rabi(amps=amps, MC=MC, analyze=False,
all_modules=all_modules)
a = ma.Rabi_Analysis(close_fig=close_fig, label='rabi')
try:
if self.cfg_with_vsm():
self.mw_vsm_G_amp(a.rabi_amplitudes['piPulse'])
else:
self.mw_channel_amp(a.rabi_amplitudes['piPulse'])
except(ValueError):
warnings.warn("Extracted piPulse amplitude out of parameter range. "
"Keeping previous value.")
return True
# FIXME: code contains errors
# def calibrate_mw_pulse_amplitude_coarse_test(self,
# amps=None,
# close_fig=True, verbose=False,
# MC=None, update=True,
# all_modules=False):
# """
# Calibrates the pulse amplitude using a single rabi oscillation.
# Depending on self.cfg_with_vsm uses VSM or AWG channel amplitude
# to sweep the amplitude of the pi pulse
#
# For details see self.measure_rabi
# """
# self.ro_acq_averages(2**10)
# self.ro_soft_avg(3)
# # self.mw_gauss_width(10e-9)
# # self.mw_pulse_duration()=4*self.mw_gauss_width()
# if amps is None:
# if self.cfg_with_vsm():
# amps = np.linspace(0.1, 1, 31)
# else:
# amps = np.linspace(0, 1, 31)
#
# self.measure_rabi(amps=amps, MC=MC, analyze=False,
# all_modules=all_modules)
# a = ma.Rabi_Analysis(close_fig=close_fig, label='rabi')
# old_gw = self.mw_gauss_width()
# if a.rabi_amplitudes['piPulse'] > 1 or a.rabi_amplitudes['piHalfPulse'] > a.rabi_amplitudes['piPulse']:
# self.mw_gauss_width(2*old_gw)
# self.prepare_for_timedomain()
# mw_lutman.load_waveforms_onto_AWG_lookuptable(
# force_load_sequencer_program=False)
#
# try:
# if self.cfg_with_vsm():
# self.mw_vsm_G_amp(a.rabi_amplitudes['piPulse'])
# else:
# self.mw_channel_amp(a.rabi_amplitudes['piPulse'])
# except(ValueError):
# warnings.warn("Extracted piPulse amplitude out of parameter range. "
# "Keeping previous value.")
# return True
def calibrate_mw_vsm_delay(self):
"""
Uploads a sequence for calibrating the vsm delay.
The experiment consists of a single square pulse of 20 ns that
triggers both the VSM channel specified and the AWG8.
Note: there are two VSM markers, align with the first of two.
By changing the "mw_vsm_delay" parameter the delay can be calibrated.
N.B. Ensure that the signal is visible on a scope or in the UFHQC
readout first!
"""
self.prepare_for_timedomain()
CCL = self.instr_CC.get_instr()
CCL.stop()
p = sqo.vsm_timing_cal_sequence(
qubit_idx=self.cfg_qubit_nr(),
platf_cfg=self.cfg_openql_platform_fn())
CCL.eqasm_program(p.filename)
CCL.start()
print('CCL program is running. Parameter "mw_vsm_delay" can now be '
'calibrated by hand.')
def calibrate_mixer_skewness_drive(self, MC=None,
mixer_channels: list = ['G', 'D'],
x0: list = [1.0, 0.0],
cma_stds: list = [.15, 10],
maxfevals: int = 250,
update: bool = True)-> bool:
"""
Calibrates the mixer skewness and updates values in the qubit object.
Args:
MC (MeasurementControl):
instance of Measurement Control
mixer_channels (list):
list of strings indicating what channels to
calibrate. In VSM case 'G' and/or 'D' can be specified.
In no-VSM case mixer_channels is alway set to ['G'].
update (bool):
if True updates values in the qubit object.
Return:
success (bool):
returns True if succesful. Currently always
returns True (i.e., no sanity check implemented)
"""
# turn relevant channels on
if MC == None:
MC = self.instr_MC.get_instr()
# Load the sequence
CCL = self.instr_CC.get_instr()
p = sqo.CW_tone(
qubit_idx=self.cfg_qubit_nr(),
platf_cfg=self.cfg_openql_platform_fn())
CCL.eqasm_program(p.filename)
CCL.start()
if self.cfg_with_vsm():
# Open the VSM channel
VSM = self.instr_VSM.get_instr()
ch_in = self.mw_vsm_ch_in()
# module 8 is hardcoded for use mixer calls (signal hound)
VSM.set('mod8_marker_source'.format(ch_in), 'int')
VSM.set('mod8_ch{}_marker_state'.format(ch_in), 'on')
VSM.set('mod8_ch{}_gaussian_amp'.format(ch_in), 1.0)
VSM.set('mod8_ch{}_derivative_amp'.format(ch_in), 1.0)
else:
mixer_channels = ['G']
mw_lutman = self.instr_LutMan_MW.get_instr()
mw_lutman.mixer_apply_predistortion_matrix(True)
# # Define the parameters that will be varied
for mixer_ch in mixer_channels:
if self.cfg_with_vsm():
alpha = mw_lutman.parameters['{}_mixer_alpha'.format(mixer_ch)]
phi = mw_lutman.parameters['{}_mixer_phi'.format(mixer_ch)]
if mixer_ch == 'G':
mw_lutman.sq_G_amp(.5)
mw_lutman.sq_D_amp(0)
elif mixer_ch == 'D':
mw_lutman.sq_G_amp(0)
mw_lutman.sq_D_amp(.5)
else:
alpha = mw_lutman.parameters['mixer_alpha']
phi = mw_lutman.parameters['mixer_phi']
mw_lutman.sq_amp(.5)
spurious_sideband_freq = self.freq_qubit() - 2*self.mw_freq_mod()
# This is to ensure the square waveform is pulse 10!
mw_lutman.set_default_lutmap()
if self._using_QWG():
prepare_function = mw_lutman.apply_mixer_predistortion_corrections
prepare_function_kwargs = {'wave_dict': {}}
else:
def load_square():
AWG = mw_lutman.AWG.get_instr()
AWG.stop()
# When using real-time modulation, mixer_alpha is encoded in channel amplitudes.
# Loading amplitude ensures new amplitude will be calculated with mixer_alpha.
if mw_lutman.cfg_sideband_mode() == 'real-time':
mw_lutman._set_channel_amp(mw_lutman._get_channel_amp())
# Codeword 10 is hardcoded in the generate CCL config
# mw_lutman.load_waveform_realtime(wave_id='square')
mw_lutman.load_waveforms_onto_AWG_lookuptable(
force_load_sequencer_program=False)
AWG.start()
prepare_function = load_square
prepare_function_kwargs = {}
detector = det.Signal_Hound_fixed_frequency(
self.instr_SH.get_instr(), spurious_sideband_freq,
prepare_for_each_point=True,
Navg=5,
prepare_function=prepare_function,
prepare_function_kwargs=prepare_function_kwargs)
# mw_lutman.load_waveform_realtime,
# prepare_function_kwargs={'waveform_key': 'square', 'wf_nr': 10})
ad_func_pars = {'adaptive_function': cma.fmin,
'x0': x0,
'sigma0': 1,
'minimize': True,
'noise_handler': cma.NoiseHandler(N=2),
'options': {'cma_stds': cma_stds,
'maxfevals': maxfevals}} # Should be enough for mixer skew
MC.set_sweep_functions([alpha, phi])
#MC.set_sweep_function(alpha)
MC.set_detector_function(detector) # sets test_detector
MC.set_adaptive_function_parameters(ad_func_pars)
MC.set_sweep_points(np.linspace(0,2,300))
MC.run(
name='Spurious_sideband_{}{}'.format(
mixer_ch, self.msmt_suffix),
mode='adaptive')
# For the figure
ma.OptimizationAnalysis_v2()
a = ma.OptimizationAnalysis(auto=True, label='Spurious_sideband')
alpha = a.optimization_result[0][0]
phi = a.optimization_result[0][1]
if update:
self.set('mw_{}_mixer_alpha'.format(mixer_ch), alpha)
self.set('mw_{}_mixer_phi'.format(mixer_ch), phi)
return True
# def calibrate_mixer_skewness_RO(self, update=True):
# """
# Calibrates the mixer skewness using mixer_skewness_cal_UHFQC_adaptive
# see calibration toolbox for details
# Args:
# update (bool):
# if True updates values in the qubit object.
# Return:
# success (bool):
# returns True if succesful. Currently always
# returns True (i.e., no sanity check implemented)
# """
# # using the restless tuning sequence
# self.prepare_for_timedomain()
# p = sqo.randomized_benchmarking(
# self.cfg_qubit_nr(), self.cfg_openql_platform_fn(),
# nr_cliffords=[1],
# net_clifford=1, nr_seeds=1, restless=True, cal_points=False)
# self.instr_CC.get_instr().eqasm_program(p.filename)
# self.instr_CC.get_instr().start()
# LutMan = self.instr_LutMan_RO.get_instr()
# LutMan.mixer_apply_predistortion_matrix(True)
# MC = self.instr_MC.get_instr()
# S1 = swf.lutman_par_UHFQC_dig_trig(
# LutMan, LutMan.mixer_alpha, single=False, run=True)
# S2 = swf.lutman_par_UHFQC_dig_trig(
# LutMan, LutMan.mixer_phi, single=False, run=True)
# detector = det.Signal_Hound_fixed_frequency(
# self.instr_SH.get_instr(), frequency=(self.instr_LO_ro.get_instr().frequency() -
# self.ro_freq_mod()),
# Navg=5, delay=0.0, prepare_for_each_point=False)
# ad_func_pars = {'adaptive_function': nelder_mead,
# 'x0': [1.0, 0.0],
# 'initial_step': [.15, 10],
# 'no_improv_break': 15,
# 'minimize': True,
# 'maxiter': 500}
# MC.set_sweep_functions([S1, S2])
# MC.set_detector_function(detector) # sets test_detector
# MC.set_adaptive_function_parameters(ad_func_pars)
# MC.run(name='Spurious_sideband', mode='adaptive')
# a = ma.OptimizationAnalysis(auto=True, label='Spurious_sideband')
# alpha = a.optimization_result[0][0]
# phi = a.optimization_result[0][1]
# if update:
# self.ro_pulse_mixer_phi.set(phi)
# self.ro_pulse_mixer_alpha.set(alpha)
# LutMan.mixer_alpha(alpha)
# LutMan.mixer_phi(phi)
def calibrate_mixer_skewness_RO(self, update=True):
"""
Calibrates the mixer skewness using mixer_skewness_cal_UHFQC_adaptive
see calibration toolbox for details
Args:
update (bool):
if True updates values in the qubit object.
Return:
success (bool):
returns True if succesful. Currently always
returns True (i.e., no sanity check implemented)
"""
CCL = self.instr_CC.get_instr()
p = sqo.CW_RO_sequence(
qubit_idx=self.cfg_qubit_nr(),
platf_cfg=self.cfg_openql_platform_fn())
CCL.eqasm_program(p.filename)
CCL.start()
# using the restless tuning sequence
# self.prepare_for_timedomain()
# p = sqo.randomized_benchmarking(
# self.cfg_qubit_nr(), self.cfg_openql_platform_fn(),
# nr_cliffords=[1],
# net_clifford=1, nr_seeds=1, restless=True, cal_points=False)
# self.instr_CC.get_instr().eqasm_program(p.filename)
# self.instr_CC.get_instr().start()
LutMan = self.instr_LutMan_RO.get_instr()
LutMan.mixer_apply_predistortion_matrix(True)
MC = self.instr_MC.get_instr()
S1 = swf.lutman_par_UHFQC_dig_trig(
LutMan, LutMan.mixer_alpha, single=False, run=True)
S2 = swf.lutman_par_UHFQC_dig_trig(
LutMan, LutMan.mixer_phi, single=False, run=True)
detector = det.Signal_Hound_fixed_frequency(
self.instr_SH.get_instr(),
frequency=self.ro_freq() - 2*self.ro_freq_mod(),
Navg=5, delay=0.0,
prepare_for_each_point=False)
ad_func_pars = {'adaptive_function': nelder_mead,
'x0': [1.0, 0.0],
'initial_step': [.15, 10],
'no_improv_break': 15,
'minimize': True,
'maxiter': 500}
MC.set_sweep_functions([S1, S2])
MC.set_detector_function(detector) # sets test_detector
MC.set_adaptive_function_parameters(ad_func_pars)
MC.run(name='Spurious_sideband', mode='adaptive')
a = ma.OptimizationAnalysis(auto=True, label='Spurious_sideband')
alpha = a.optimization_result[0][0]
phi = a.optimization_result[0][1]
if update:
self.ro_pulse_mixer_phi.set(phi)
self.ro_pulse_mixer_alpha.set(alpha)
LutMan.mixer_alpha(alpha)
LutMan.mixer_phi(phi)
def calibrate_mixer_offsets_RO(self, update: bool = True,
ftarget=-110) -> bool:
"""
Calibrates the mixer offset and updates the I and Q offsets in
the qubit object.
Args:
update (bool):
if True updates values in the qubit object.
ftarget (float): power of the signal at the LO frequency
for which the optimization is terminated
Return:
success (bool):
returns True if succesful. Currently always
returns True (i.e., no sanity check implemented)
"""
chI_par = self.instr_acquisition.get_instr().sigouts_0_offset
chQ_par = self.instr_acquisition.get_instr().sigouts_1_offset
offset_I, offset_Q = cal_toolbox.mixer_carrier_cancellation(
SH=self.instr_SH.get_instr(),
source=self.instr_LO_ro.get_instr(),
MC=self.instr_MC.get_instr(),
chI_par=chI_par,
chQ_par=chQ_par,
x0=(0.05, 0.05),
ftarget=ftarget)
if update:
self.ro_pulse_mixer_offs_I(offset_I)
self.ro_pulse_mixer_offs_Q(offset_Q)
return True
def calibrate_mw_pulses_basic(self,
cal_steps=['offsets', 'amp_coarse', 'freq',
'drag', 'amp_fine', 'amp_fine',
'amp_fine'],
kw_freqs={'steps': [1, 3, 10, 30, 100,
300, 1000]},
kw_amp_coarse={'amps': np.linspace(0, 1, 31)},
kw_amp_fine={'update': True},
soft_avg_allxy=3,
kw_offsets={'ftarget': -120},
kw_skewness={},
kw_motzoi={'update': True},
f_target_skewness=-120):
"""
Performs a standard calibration of microwave pulses consisting of
- mixer offsets
- mixer skewness
- pulse ampl coarse (rabi)
- frequency (ramsey)
- motzoi
- ampl fine (flipping)
- AllXY (to verify)
Note that this is a basic calibration and does not involve fine tuning
to ~99.9% and only works if the qubit is well behaved.
"""
for this_step in cal_steps:
if this_step == 'offsets':
self.calibrate_mixer_offsets_drive(**kw_offsets)
elif this_step == 'skewness':
self.calibrate_mixer_skewness_drive(**kw_skewness)
elif this_step == 'amp_coarse':
self.calibrate_mw_pulse_amplitude_coarse(**kw_amp_coarse)
elif this_step == 'freq':
self.find_frequency('ramsey', **kw_freqs)
elif this_step == 'drag':
self.calibrate_motzoi(**kw_motzoi)
elif this_step == 'amp_fine':
self.measure_flipping(**kw_amp_fine)
old_soft_avg = self.ro_soft_avg()
self.ro_soft_avg(soft_avg_allxy)
self.measure_allxy()
self.ro_soft_avg(old_soft_avg)
return True
def calibrate_ssro_coarse(self, MC=None,
nested_MC=None,
freqs=None,
amps=None,
analyze: bool = True,
update: bool = True):
'''
Performs a 2D sweep of <qubit>.ro_freq and <qubit>.ro_pulse_amp and
measures SSRO parameters (SNR, F_a, F_d).
After the sweep is done, it sets the parameters for which the assignment
fidelity was maximum.
Args:
freq (array):
Range of frequencies of sweep.
amps (array):
Range of amplitudes of sweep.
'''
if MC is None:
MC = self.instr_MC.get_instr()
if nested_MC is None:
nested_MC = self.instr_nested_MC.get_instr()
if freqs is None:
if self.dispersive_shift() is not None:
freqs = np.arange(-2*abs(self.dispersive_shift()),
abs(self.dispersive_shift()), .5e6) + self.freq_res()
else:
raise ValueError('self.dispersive_shift is None. Please specify\
range of sweep frequencies.')
if amps is None:
amps = np.linspace(.001, .5, 31)
ro_lm = self.find_instrument(self.instr_LutMan_RO())
q_idx = self.cfg_qubit_nr()
swf1 = swf.RO_freq_sweep(name='RO frequency',
qubit=self,
ro_lutman=ro_lm,
idx=q_idx,
parameter=self.ro_freq)
nested_MC.set_sweep_function(swf1)
nested_MC.set_sweep_points(freqs)
nested_MC.set_sweep_function_2D(self.ro_pulse_amp)
nested_MC.set_sweep_points_2D(amps)
d = det.Function_Detector(self.measure_ssro,
result_keys=['SNR', 'F_a', 'F_d'],
value_names=['SNR', 'F_a', 'F_d'],
value_units=['a.u.', 'a.u.', 'a.u.'],
msmt_kw={'prepare': False}
)
nested_MC.set_detector_function(d)
nested_MC.run(name='RO_coarse_tuneup', mode='2D')
if analyze is True:
# Analysis
a = ma.TwoD_Analysis(label='RO_coarse_tuneup', auto=False)
# Get best parameters
a.get_naming_and_values_2D()
arg = np.argmax(a.measured_values[1])
index = np.unravel_index(arg, (len(a.sweep_points),
len(a.sweep_points_2D)))
best_freq = a.sweep_points[index[0]]
best_amp = a.sweep_points_2D[index[1]]
a.run_default_analysis()
print('Frequency: {}, Amplitude: {}'.format(best_freq, best_amp))
if update is True:
self.ro_freq(best_freq)
self.ro_pulse_amp(best_amp)
return True
def calibrate_ssro_pulse_duration(self, MC=None,
nested_MC=None,
amps=None,
amp_lim=None,
times= None,
use_adaptive: bool = True,
n_points: int = 80,
analyze: bool = True,
update: bool = True):
'''
Calibrates the RO pulse duration by measuring the assignment fidelity of
SSRO experiments as a function of the RO pulse duration and amplitude.
For each set of parameters, the routine calibrates optimal weights and
then extracts readout fidelity.
This measurement can be performed using an adaptive sampler
(use_adaptive=True) or a regular 2D parameter sweep (use_adaptive=False).
Designed to be used in the GBT node 'SSRO Pulse Duration'.
Args:
amps (array):
If using 2D sweep:
Set of RO amplitudes sampled in the 2D sweep.
If using adaptive sampling:
Minimum and maximum (respectively) of the RO amplitude range
used in the adaptive sampler.
times (array):
If using 2D sweep:
Set of RO pulse durations sampled in the 2D sweep.
If using adaptive sampling:
Minimum and maximum (respectively) of the RO pulse duration
range used in the adaptive sampler.
use_adaptive (bool):
Boolean that sets the sampling mode. Set to "False" for a
regular 2D sweep or set to "True" for adaptive sampling.
n_points:
Only relevant in the adaptive sampling mode. Sets the maximum
number of points sampled.
'''
if MC is None:
MC = self.instr_MC.get_instr()
if nested_MC is None:
nested_MC = self.instr_nested_MC.get_instr()
if times is None:
times = np.arange(10e-9, 401e-9, 10e-9)
if amps is None:
amps = np.linspace(.01,.25,11)
if amp_lim is None:
amp_lim = (0.01, 0.2)
######################
# Experiment
######################
nested_MC.set_sweep_functions([self.ro_pulse_length,
self.ro_pulse_amp])
d = det.Function_Detector(self.calibrate_optimal_weights,
result_keys=['F_a','F_d','SNR'],
value_names=['F_a','F_d','SNR'],
value_units=['a.u.','a.u.','a.u.'])
nested_MC.set_detector_function(d)
# Use adaptive sampling
if use_adaptive is True:
# Adaptive sampler cost function
loss_per_simplex = mk_minimization_loss_func()
goal = mk_minimization_goal_func()
nested_MC.set_adaptive_function_parameters(
{'adaptive_function': LearnerND_Minimizer,
'goal': lambda l: goal(l) or l.npoints > n_points,
'loss_per_simplex': loss_per_simplex,
'bounds': [(10e-9, 400e-9), amp_lim],
'minimize': False
})
nested_MC.run(name='RO_duration_tuneup_{}'.format(self.name),
mode='adaptive')
# Use standard 2D sweep
else:
nested_MC.set_sweep_points(times)
nested_MC.set_sweep_points_2D(amps)
nested_MC.run(name='RO_duration_tuneup_{}'.format(self.name),
mode='2D')
#####################
# Analysis
#####################
if analyze is True:
if use_adaptive is True:
A = ma2.Readout_landspace_Analysis(label='RO_duration_tuneup')
optimal_pulse_duration = A.qoi['Optimal_parameter_X']
optimal_pulse_amplitude = A.qoi['Optimal_parameter_Y']
self.ro_pulse_length(optimal_pulse_duration)
self.ro_pulse_amp(optimal_pulse_amplitude)
else:
A = ma.TwoD_Analysis(label='RO_duration_tuneup', auto=True)
return True
def calibrate_ssro_fine(self, MC=None,
nested_MC=None,
start_freq=None,
start_amp=None,
start_freq_step=None,
start_amp_step=None,
threshold: float = .99,
analyze: bool = True,
update: bool = True):
'''
Runs an optimizer routine on the SSRO assignment fidelity of the
<qubit>.ro_freq and <qubit>.ro_pulse_amp parameters.
Intended to be used in the "SSRO Optimization" node of GBT.
Args:
start_freq (float):
Starting frequency of the optmizer.
start_amp (float):
Starting amplitude of the optimizer.
start_freq_step (float):
Starting frequency step of the optmizer.
start_amp_step (float):
Starting amplitude step of the optimizer.
threshold (float):
Fidelity thershold after which the optimizer stops iterating.
'''
if MC is None:
MC = self.instr_MC.get_instr()
if nested_MC is None:
nested_MC = self.instr_nested_MC.get_instr()
if start_freq_step is None:
if start_freq is None:
start_freq = self.ro_freq()
start_freq_step = 0.1e6
else:
raise ValueError('Must provide start frequency step if start\
frequency is specified.')
if start_amp_step is None:
if start_amp is None:
start_amp = self.ro_pulse_amp()
start_amp_step = 0.01
else:
raise ValueError('Must provide start amplitude step if start\
amplitude is specified.')
if start_amp is None:
start_amp = self.ro_pulse_amp()
nested_MC.set_sweep_functions([self.ro_freq, self.ro_pulse_amp])
d = det.Function_Detector(self.calibrate_optimal_weights,
result_keys=['F_a'],
value_names=['F_a'],
value_units=['a.u.'])
nested_MC.set_detector_function(d)
ad_func_pars = {'adaptive_function': nelder_mead,
'x0': [self.ro_freq(), self.ro_pulse_amp()],
'initial_step': [start_freq_step, start_amp_step],
'no_improv_break': 10,
'minimize': False,
'maxiter': 20,
'f_termination': threshold}
nested_MC.set_adaptive_function_parameters(ad_func_pars)
nested_MC.set_optimization_method('nelder_mead')
nested_MC.run(name='RO_fine_tuneup', mode='adaptive')
if analyze is True:
ma.OptimizationAnalysis(label='RO_fine_tuneup')
return True
def calibrate_ro_acq_delay(self, MC=None,
analyze: bool = True,
prepare: bool = True,
disable_metadata: bool = False):
"""
Calibrates the ro_acq_delay parameter for the readout.
For that it analyzes the transients.
"""
self.ro_acq_delay(0) # set delay to zero
old_pow = self.ro_pulse_amp()
self.ro_pulse_amp(0.5)
if MC is None:
MC = self.instr_MC.get_instr()
# if plot_max_time is None:
# plot_max_time = self.ro_acq_integration_length()+250e-9
if prepare:
self.prepare_for_timedomain()
p = sqo.off_on(
qubit_idx=self.cfg_qubit_nr(), pulse_comb='off',
initialize=False,
platf_cfg=self.cfg_openql_platform_fn())
self.instr_CC.get_instr().eqasm_program(p.filename)
else:
p = None # object needs to exist for the openql_sweep to work
s = swf.OpenQL_Sweep(openql_program=p,
CCL=self.instr_CC.get_instr(),
parameter_name='Transient time', unit='s',
upload=prepare)
MC.set_sweep_function(s)
if 'UHFQC' in self.instr_acquisition():
sampling_rate = 1.8e9
else:
raise NotImplementedError()
MC.set_sweep_points(np.arange(self.input_average_detector.nr_samples) /
sampling_rate)
MC.set_detector_function(self.input_average_detector)
MC.run(name='Measure_Acq_Delay_{}'.format(self.msmt_suffix),
disable_snapshot_metadata=disable_metadata)
self.ro_pulse_amp(old_pow)
if analyze:
a = ma2.RO_acquisition_delayAnalysis(qubit_name=self.name)
# Delay time is averaged over the two quadratures.
delay_time = (a.proc_data_dict['I_pulse_start'] +
a.proc_data_dict['Q_pulse_start'])/2
self.ro_acq_delay(delay_time)
return True
def calibrate_mw_gates_restless(
self, MC=None,
parameter_list: list = ['G_amp', 'D_amp', 'freq'],
initial_values: list = None,
initial_steps: list = [0.05, 0.05, 1e6],
nr_cliffords: int = 80, nr_seeds: int = 200,
verbose: bool = True, update: bool = True,
prepare_for_timedomain: bool = True):
"""
Refs:
Rol PR Applied 7, 041001 (2017)
"""
return self.calibrate_mw_gates_rb(
MC=None,
parameter_list=parameter_list,
initial_values=initial_values,
initial_steps=initial_steps,
nr_cliffords=nr_cliffords, nr_seeds=nr_seeds,
verbose=verbose, update=update,
prepare_for_timedomain=prepare_for_timedomain,
method='restless')
def calibrate_mw_gates_rb(
self, MC=None,
parameter_list: list = ['G_amp', 'D_amp', 'freq'],
initial_values: list = None,
initial_steps: list = [0.05, 0.05, 1e6],
nr_cliffords: int = 80, nr_seeds: int = 200,
verbose: bool = True, update: bool = True,
prepare_for_timedomain: bool = True,
method: bool = None,
optimizer: str = 'NM'):
"""
Calibrates microwave pulses using a randomized benchmarking based
cost-function.
requirements for restless:
- Digitized readout (calibrated)
requirements for ORBIT:
- Optimal weights such that minimizing correspond to 0 state.
"""
if method is None:
method = self.cfg_rb_calibrate_method()
if method == 'restless':
restless = True
else: # ORBIT
restless = False
if MC is None:
MC = self.instr_MC.get_instr()
if initial_steps is None:
initial_steps: list = [0.05, 0.05, 1e6]
if prepare_for_timedomain:
self.prepare_for_timedomain()
if parameter_list is None:
# parameter_list = ['G_amp', 'D_amp']
parameter_list = ['G_amp', 'D_amp','freq']
mw_lutman = self.instr_LutMan_MW.get_instr()
G_amp_par = wrap_par_to_swf(
mw_lutman.parameters['channel_amp'],
retrieve_value=True)
D_amp_par = swf.QWG_lutman_par(LutMan=mw_lutman,
LutMan_parameter=mw_lutman.mw_motzoi)
freq_par = self.instr_LO_mw.get_instr().frequency
sweep_pars = []
for par in parameter_list:
if par == 'G_amp':
sweep_pars.append(G_amp_par)
elif par == 'D_amp':
sweep_pars.append(D_amp_par)
elif par == 'freq':
sweep_pars.append(freq_par)
else:
raise NotImplementedError(
"Parameter {} not recognized".format(par))
if initial_values is None:
# use the current values of the parameters being varied.
initial_values = [G_amp_par.get(),mw_lutman.mw_motzoi.get(),freq_par.get()]
# Preparing the sequence
if restless:
net_clifford = 3 # flipping sequence
d = det.UHFQC_single_qubit_statistics_logging_det(
self.instr_acquisition.get_instr(),
self.instr_CC.get_instr(), nr_shots=4*4095,
integration_length=self.ro_acq_integration_length(),
channel=self.ro_acq_weight_chI(),
statemap={'0': '1', '1': '0'})
minimize = False
msmt_string = 'Restless_tuneup_{}Cl_{}seeds'.format(
nr_cliffords, nr_seeds) + self.msmt_suffix
else:
net_clifford = 0 # not flipping sequence
d = self.int_avg_det_single
minimize = True
msmt_string = 'ORBIT_tuneup_{}Cl_{}seeds'.format(
nr_cliffords, nr_seeds) + self.msmt_suffix
p = sqo.randomized_benchmarking(
self.cfg_qubit_nr(), self.cfg_openql_platform_fn(),
nr_cliffords=[nr_cliffords],
net_clifford=net_clifford, nr_seeds=nr_seeds,
restless=restless, cal_points=False)
self.instr_CC.get_instr().eqasm_program(p.filename)
self.instr_CC.get_instr().start()
MC.set_sweep_functions(sweep_pars)
MC.set_detector_function(d)
if optimizer == 'CMA':
ad_func_pars = {'adaptive_function': cma.fmin,
'x0': initial_values,
'sigma0': 1,
# 'noise_handler': cma.NoiseHandler(len(initial_values)),
'minimize': minimize,
'options': {'cma_stds': initial_steps}}
elif optimizer == 'NM':
ad_func_pars = {'adaptive_function': nelder_mead,
'x0': initial_values,
'initial_step': initial_steps,
'no_improv_break': 50,
'minimize': minimize,
'maxiter': 1500}
MC.set_adaptive_function_parameters(ad_func_pars)
MC.run(name=msmt_string,
mode='adaptive')
a = ma.OptimizationAnalysis(label=msmt_string)
if update:
if verbose:
print("Updating parameters in qubit object")
opt_par_values = a.optimization_result[0]
for par in parameter_list:
if par == 'G_amp':
G_idx = parameter_list.index('G_amp')
self.mw_channel_amp(opt_par_values[G_idx])
elif par == 'D_amp':
D_idx = parameter_list.index('D_amp')
self.mw_vsm_D_amp(opt_par_values[D_idx])
elif par == 'D_phase':
D_idx = parameter_list.index('D_phase')
self.mw_vsm_D_phase(opt_par_values[D_idx])
elif par == 'freq':
freq_idx = parameter_list.index('freq')
# We are varying the LO frequency in the opt, not the q freq.
self.freq_qubit(opt_par_values[freq_idx] +
self.mw_freq_mod.get())
def calibrate_mw_gates_allxy(self, nested_MC=None,
start_values=None,
initial_steps=None,
parameter_list=None,
termination_opt=0.01):
# FIXME: this tuneup does not update the qubit object parameters
# update: Fixed on the the pagani set-up
# FIXME2: this tuneup does not return True upon success
# update: Fixed on the pagani set-up
if initial_steps is None:
if parameter_list is None:
initial_steps = [1e6, 0.05, 0.05]
else:
raise ValueError(
"must pass initial steps if setting parameter_list")
if nested_MC is None:
nested_MC = self.instr_nested_MC.get_instr()
if parameter_list is None:
if self.cfg_with_vsm():
parameter_list = ["freq_qubit",
"mw_vsm_G_amp",
"mw_vsm_D_amp"]
else:
parameter_list = ["freq_qubit",
"mw_channel_amp",
"mw_motzoi"]
nested_MC.set_sweep_functions([
self.__getattr__(p) for p in parameter_list])
if start_values is None:
# use current values
start_values = [self.get(p) for p in parameter_list]
d = det.Function_Detector(self.measure_allxy,
value_names=['AllXY cost'],
value_units=['a.u.'],)
nested_MC.set_detector_function(d)
ad_func_pars = {'adaptive_function': nelder_mead,
'x0': start_values,
'initial_step': initial_steps,
'no_improv_break': 10,
'minimize': True,
'maxiter': 500,
'f_termination': termination_opt}
nested_MC.set_adaptive_function_parameters(ad_func_pars)
nested_MC.set_optimization_method('nelder_mead')
nested_MC.run(name='gate_tuneup_allxy', mode='adaptive')
a2 = ma.OptimizationAnalysis(label='gate_tuneup_allxy')
if a2.optimization_result[1][0] > termination_opt:
return False
else:
return True
def calibrate_mw_gates_allxy2(self, nested_MC=None,
start_values=None,
initial_steps=None, f_termination=0.01):
'''
FIXME! Merge both calibrate allxy methods.
Optimizes ALLXY sequency by tunning 2 parameters:
mw_channel_amp and mw_motzoi.
Used for Graph based tune-up in the ALLXY node.
'''
old_avg = self.ro_acq_averages()
self.ro_acq_averages(2**14)
VSM = self.instr_VSM.get_instr()
# Close all vsm channels
modules = range(8)
for module in modules:
VSM.set('mod{}_marker_source'.format(module+1), 'int')
for channel in [1, 2, 3, 4]:
VSM.set('mod{}_ch{}_marker_state'.format(
module+1, channel), 'off')
# Open intended channel
VSM.set('mod{}_marker_source'.format(self.mw_vsm_mod_out()), 'int')
VSM.set('mod{}_ch{}_marker_state'.format(
self.mw_vsm_mod_out(), self.mw_vsm_ch_in()), 'on')
if initial_steps is None:
initial_steps = [0.05, 0.05]
if nested_MC is None:
nested_MC = self.instr_nested_MC.get_instr()
if self.cfg_with_vsm():
parameter_list = ["mw_vsm_G_amp",
"mw_vsm_D_amp"]
else:
parameter_list = ["mw_channel_amp",
"mw_motzoi"]
nested_MC.set_sweep_functions([
self.__getattr__(p) for p in parameter_list])
if start_values is None:
# use current values
start_values = [self.get(p) for p in parameter_list]
d = det.Function_Detector(self.measure_allxy,
value_names=['AllXY cost'],
value_units=['a.u.'],)
nested_MC.set_detector_function(d)
ad_func_pars = {'adaptive_function': nelder_mead,
'x0': start_values,
'initial_step': initial_steps,
'no_improv_break': 10,
'minimize': True,
'maxiter': 500,
'f_termination': f_termination}
nested_MC.set_adaptive_function_parameters(ad_func_pars)
nested_MC.set_optimization_method('nelder_mead')
nested_MC.run(name='gate_tuneup_allxy', mode='adaptive')
a2 = ma.OptimizationAnalysis(label='gate_tuneup_allxy')
self.ro_acq_averages(old_avg)
# Open all vsm channels
for module in modules:
VSM.set('mod{}_marker_source'.format(module+1), 'int')
for channel in [1, 2, 3, 4]:
VSM.set('mod{}_ch{}_marker_state'.format(
module+1, channel), 'on')
if a2.optimization_result[1][0] > f_termination:
return False
else:
return True
def calibrate_RO(self, nested_MC=None,
start_params=None,
initial_step=None,
threshold=0.05):
'''
Optimizes the RO assignment fidelity using 2 parameters:
ro_freq and ro_pulse_amp.
Args:
start_params: Starting parameters for <qubit>.ro_freq and
<qubit>.ro_pulse_amp. These have to be passed on in
the aforementioned order, that is:
[ro_freq, ro_pulse_amp].
initial_steps: These have to be given in the order:
[ro_freq, ro_pulse_amp]
threshold: Assignment fidelity error (1-F_a) threshold used in
the optimization.
Used for Graph based tune-up.
'''
# FIXME: Crashes whenever it tries to set the pulse amplitude higher
# than 1.
if nested_MC is None:
nested_MC = self.instr_nested_MC.get_instr()
if start_params is None:
start_params = [self.ro_freq(), self.ro_pulse_amp()]
if initial_step is None:
initial_step = [1.e6, .05]
nested_MC.set_sweep_functions([self.ro_freq, self.ro_pulse_amp])
def wrap_func():
error = 1 - self.calibrate_optimal_weights()['F_a']
return error
d = det.Function_Detector(wrap_func,
value_names=['F_a error'],
value_units=['a.u.'])
nested_MC.set_detector_function(d)
ad_func_pars = {'adaptive_function': nelder_mead,
'x0': start_params,
'initial_step': initial_step,
'no_improv_break': 10,
'minimize': True,
'maxiter': 20,
'f_termination': threshold}
nested_MC.set_adaptive_function_parameters(ad_func_pars)
nested_MC.set_optimization_method('nelder_mead')
nested_MC.run(name='RO_tuneup', mode='adaptive')
a = ma.OptimizationAnalysis(label='RO_tuneup')
if a.optimization_result[1][0] > 0.05: # Fidelity 0.95
return False
else:
return True
def calibrate_depletion_pulse(
self, nested_MC=None, amp0=None,
amp1=None, phi0=180, phi1=0, initial_steps=None, two_par=True,
depletion_optimization_window=None, depletion_analysis_plot=False,
use_RTE_cost_function=False):
"""
this function automatically tunes up a two step, four-parameter
depletion pulse.
It uses the averaged transients for ground and excited state for its
cost function.
Refs:
Bultnik PR Applied 6, 034008 (2016)
Args:
two_par: if readout is performed at the symmetry point and in the
linear regime two parameters will suffice. Othen, four
paramters do not converge.
First optimizaing the amplitudes (two paramters) and
then run the full 4 paramaters with the correct initial
amplitudes works.
optimization_window: optimization window determins which part of
the transients will be
nulled in the optimization. By default it uses a
window of 500 ns post depletiona with a 50 ns buffer.
initial_steps: These have to be given in the order
[phi0,phi1,amp0,amp1] for 4-par tuning and
[amp0,amp1] for 2-par tunining
"""
# FIXME: this calibration does not update the qubit object params
# FIXME2: this calibration does not return a boolean upon success
# tuneup requires nested MC as the transients detector will use MC
self.ro_pulse_type('up_down_down')
if nested_MC is None:
nested_MC = self.instr_nested_MC.get_instr()
# setting the initial depletion amplitudes
if amp0 is None:
amp0 = 2*self.ro_pulse_amp()
if amp1 is None:
amp1 = 0.5*self.ro_pulse_amp()
if depletion_optimization_window is None:
depletion_optimization_window = [
self.ro_pulse_length()+self.ro_pulse_down_length0()
+ self.ro_pulse_down_length1()+50e-9,
self.ro_pulse_length()+self.ro_pulse_down_length0()
+ self.ro_pulse_down_length1()+550e-9]
if two_par:
nested_MC.set_sweep_functions([
self.ro_pulse_down_amp0,
self.ro_pulse_down_amp1])
else:
nested_MC.set_sweep_functions([self.ro_pulse_down_phi0,
self.ro_pulse_down_phi1,
self.ro_pulse_down_amp0,
self.ro_pulse_down_amp1])
if use_RTE_cost_function:
d = det.Function_Detector(self.measure_error_fraction,
msmt_kw={'net_gate': 'pi',
'feedback': False,
'sequence_type': 'echo'},
value_names=['error fraction'],
value_units=['au'],
result_keys=['error fraction'])
else:
d = det.Function_Detector(self.measure_transients,
msmt_kw={'depletion_analysis': True,
'depletion_analysis_plot':
depletion_analysis_plot,
'depletion_optimization_window':
depletion_optimization_window},
value_names=['depletion cost'],
value_units=['au'],
result_keys=['depletion_cost'])
nested_MC.set_detector_function(d)
if two_par:
if initial_steps is None:
initial_steps = [-0.5*amp0, -0.5*amp1]
ad_func_pars = {'adaptive_function': nelder_mead,
'x0': [amp0, amp1],
'initial_step': initial_steps,
'no_improv_break': 12,
'minimize': True,
'maxiter': 500}
self.ro_pulse_down_phi0(180)
self.ro_pulse_down_phi1(0)
else:
if initial_steps is None:
initial_steps = [15, 15, -0.1*amp0, -0.1*amp1]
ad_func_pars = {'adaptive_function': nelder_mead,
'x0': [phi0, phi1, amp0, amp1],
'initial_step': initial_steps,
'no_improv_break': 12,
'minimize': True,
'maxiter': 500}
nested_MC.set_adaptive_function_parameters(ad_func_pars)
nested_MC.set_optimization_method('nelder_mead')
nested_MC.run(name='depletion_tuneup', mode='adaptive')
ma.OptimizationAnalysis(label='depletion_tuneup')
def calibrate_ef_rabi(self,
amps: list = np.linspace(-.8, .8, 18),
recovery_pulse: bool = True,
MC=None, label: str = '',
analyze=True, close_fig=True,
prepare_for_timedomain=True, update=True):
"""
Calibrates the pi pulse of the ef/12 transition using
a rabi oscillation of the ef/12 transition.
Modulation frequency of the "ef" pulses is controlled through the
`anharmonicity` parameter of the qubit object.
Hint: the expected pi-pulse amplitude of the ef/12 transition is ~1/2
the pi-pulse amplitude of the ge/01 transition.
"""
a2 = self.measure_ef_rabi(amps = amps,
recovery_pulse = recovery_pulse,
MC = MC, label = label,
analyze = analyze, close_fig = close_fig,
prepare_for_timedomain = prepare_for_timedomain)
if update:
ef_pi_amp = a2.proc_data_dict['ef_pi_amp']
self.mw_ef_amp(a2.proc_data_dict['ef_pi_amp'])
##########################################################################
# calibrate_ functions (overrides for class Qubit)
##########################################################################
def calibrate_motzoi(self, MC=None, verbose=True, update=True, motzois=None):
"""
Calibrates the DRAG coeffcieint value, named motzoi (after <NAME>)
for legacy reasons.
For details see docstring of measure_motzoi method.
"""
using_VSM = self.cfg_with_vsm()
if using_VSM and motzois is None:
motzois = gen_sweep_pts(start=0.1, stop=1.0, num=31)
elif motzois is None:
motzois = gen_sweep_pts(center=0, span=.3, num=31)
# large range
a = self.measure_motzoi(MC=MC, motzoi_amps=motzois, analyze=True)
opt_motzoi = a.get_intersect()[0]
if opt_motzoi > max(motzois) or opt_motzoi < min(motzois):
if verbose:
print('optimal motzoi {:.3f} '.format(opt_motzoi) +
'outside of measured span, aborting')
return False
if update:
if using_VSM:
if verbose:
print('Setting motzoi to {:.3f}'.format(opt_motzoi))
self.mw_vsm_D_amp(opt_motzoi)
else:
self.mw_motzoi(opt_motzoi)
return opt_motzoi
def calibrate_mixer_offsets_drive(self, mixer_channels=['G', 'D'],
update: bool = True, ftarget=-110,
maxiter=300)-> bool:
"""
Calibrates the mixer offset and updates the I and Q offsets in
the qubit object.
Args:
mixer_channels (list):
No use in no-VSM case
With VSM specifies whether to calibrate offsets for both
gaussuan 'G' and derivarive 'D' channel
update (bool):
should optimal values be set in the qubit object
ftarget (float): power of the signal at the LO frequency
for which the optimization is terminated
"""
# turn relevant channels on
using_VSM = self.cfg_with_vsm()
MW_LutMan = self.instr_LutMan_MW.get_instr()
AWG = MW_LutMan.AWG.get_instr()
if using_VSM:
if AWG.__class__.__name__ == 'QuTech_AWG_Module':
chGI_par = AWG.parameters['ch1_offset']
chGQ_par = AWG.parameters['ch2_offset']
chDI_par = AWG.parameters['ch3_offset']
chDQ_par = AWG.parameters['ch4_offset']
else:
# This part is AWG8 specific and wont work with a QWG
awg_ch = self.mw_awg_ch()
AWG.stop()
AWG.set('sigouts_{}_on'.format(awg_ch-1), 1)
AWG.set('sigouts_{}_on'.format(awg_ch+0), 1)
AWG.set('sigouts_{}_on'.format(awg_ch+1), 1)
AWG.set('sigouts_{}_on'.format(awg_ch+2), 1)
chGI_par = AWG.parameters['sigouts_{}_offset'.format(awg_ch-1)]
chGQ_par = AWG.parameters['sigouts_{}_offset'.format(awg_ch+0)]
chDI_par = AWG.parameters['sigouts_{}_offset'.format(awg_ch+1)]
chDQ_par = AWG.parameters['sigouts_{}_offset'.format(awg_ch+2)]
# End of AWG8 specific part
VSM = self.instr_VSM.get_instr()
ch_in = self.mw_vsm_ch_in()
# module 8 is hardcoded for mixer calibartions (signal hound)
VSM.set('mod8_marker_source'.format(ch_in), 'int')
VSM.set('mod8_ch{}_marker_state'.format(ch_in), 'on')
# Calibrate Gaussian component mixer
if 'G' in mixer_channels:
VSM.set('mod8_ch{}_gaussian_amp'.format(ch_in), 1.0)
VSM.set('mod8_ch{}_derivative_amp'.format(ch_in), 0.1)
offset_I, offset_Q = cal_toolbox.mixer_carrier_cancellation(
SH=self.instr_SH.get_instr(),
source=self.instr_LO_mw.get_instr(),
MC=self.instr_MC.get_instr(),
chI_par=chGI_par, chQ_par=chGQ_par,
label='Mixer_offsets_drive_G'+self.msmt_suffix,
ftarget=ftarget, maxiter=maxiter)
if update:
self.mw_mixer_offs_GI(offset_I)
self.mw_mixer_offs_GQ(offset_Q)
if 'D' in mixer_channels:
# Calibrate Derivative component mixer
VSM.set('mod8_ch{}_gaussian_amp'.format(ch_in), 0.1)
VSM.set('mod8_ch{}_derivative_amp'.format(ch_in), 1.0)
offset_I, offset_Q = cal_toolbox.mixer_carrier_cancellation(
SH=self.instr_SH.get_instr(),
source=self.instr_LO_mw.get_instr(),
MC=self.instr_MC.get_instr(),
chI_par=chDI_par,
chQ_par=chDQ_par,
label='Mixer_offsets_drive_D'+self.msmt_suffix,
ftarget=ftarget, maxiter=maxiter)
if update:
self.mw_mixer_offs_DI(offset_I)
self.mw_mixer_offs_DQ(offset_Q)
else:
if self._using_QWG():
QWG_MW = self.instr_LutMan_MW.get_instr().AWG.get_instr()
chI = self.instr_LutMan_MW.get_instr().channel_I()
chQ = self.instr_LutMan_MW.get_instr().channel_Q()
chI_par = QWG_MW.parameters['ch%s_offset' % chI]
chQ_par = QWG_MW.parameters['ch%s_offset' % chQ]
offset_I, offset_Q = cal_toolbox.mixer_carrier_cancellation(
SH=self.instr_SH.get_instr(),
source=self.instr_LO_mw.get_instr(),
MC=self.instr_MC.get_instr(),
chI_par=chI_par,
chQ_par=chQ_par,
ftarget=ftarget, maxiter=maxiter)
if update:
self.mw_mixer_offs_GI(offset_I)
self.mw_mixer_offs_GQ(offset_Q)
else:
awg_ch = self.mw_awg_ch()
AWG.stop()
AWG.set('sigouts_{}_on'.format(awg_ch-1), 1)
AWG.set('sigouts_{}_on'.format(awg_ch+0), 1)
chGI_par = AWG.parameters['sigouts_{}_offset'.format(awg_ch-1)]
chGQ_par = AWG.parameters['sigouts_{}_offset'.format(awg_ch+0)]
offset_I, offset_Q = cal_toolbox.mixer_carrier_cancellation(
SH=self.instr_SH.get_instr(),
source=self.instr_LO_mw.get_instr(),
MC=self.instr_MC.get_instr(),
chI_par=chGI_par, chQ_par=chGQ_par,
label='Mixer_offsets_drive'+self.msmt_suffix,
ftarget=ftarget, maxiter=maxiter)
if update:
self.mw_mixer_offs_GI(offset_I)
self.mw_mixer_offs_GQ(offset_Q)
return True
def calibrate_optimal_weights(self, MC=None, verify: bool = True,
analyze: bool = True, update: bool = True,
no_figs: bool = False,
optimal_IQ: bool = False,
measure_transients_CCL_switched: bool = False,
prepare: bool = True,
disable_metadata: bool = False,
nr_shots_per_case: int = 2**13,
post_select: bool = False,
averages: int = 2**15,
post_select_threshold: float = None,
)->bool:
"""
Measures readout transients for the qubit in ground and excited state to indicate
at what times the transients differ. Based on the transients calculates weights
that are used to weigh measuremet traces to maximize the SNR.
Args:
optimal_IQ (bool):
if set to True sets both the I and Q weights of the optimal
weight functions for the verification experiment.
A good sanity check is that when using optimal IQ one expects
to see no signal in the Q quadrature of the verification
SSRO experiment.
verify (bool):
indicates whether to run measure_ssro at the end of the routine
to find the new SNR and readout fidelities with optimized weights
update (bool):
specifies whether to update the weights in the qubit object
"""
log.info('Calibrating optimal weights for {}'.format(self.name))
if MC is None:
MC = self.instr_MC.get_instr()
if prepare:
self.prepare_for_timedomain()
# Ensure that enough averages are used to get accurate weights
old_avg = self.ro_acq_averages()
self.ro_acq_averages(averages)
if measure_transients_CCL_switched:
transients = self.measure_transients_CCL_switched(MC=MC,
analyze=analyze,
depletion_analysis=False)
else:
transients = self.measure_transients(MC=MC, analyze=analyze,
depletion_analysis=False,
disable_metadata=disable_metadata)
if analyze:
ma.Input_average_analysis(IF=self.ro_freq_mod())
self.ro_acq_averages(old_avg)
# deskewing the input signal
# Calculate optimal weights
optimized_weights_I = (transients[1][0] - transients[0][0])
optimized_weights_Q = (transients[1][1] - transients[0][1])
# joint rescaling to +/-1 Volt
maxI = np.max(np.abs(optimized_weights_I))
maxQ = np.max(np.abs(optimized_weights_Q))
# fixme: deviding the weight functions by four to not have overflow in
# thresholding of the UHFQC
weight_scale_factor = 1./(4*np.max([maxI, maxQ]))
optimized_weights_I = np.array(
weight_scale_factor*optimized_weights_I)
optimized_weights_Q = np.array(
weight_scale_factor*optimized_weights_Q)
if update:
self.ro_acq_weight_func_I(optimized_weights_I)
self.ro_acq_weight_func_Q(optimized_weights_Q)
if optimal_IQ:
self.ro_acq_weight_type('optimal IQ')
else:
self.ro_acq_weight_type('optimal')
if verify:
self._prep_ro_integration_weights()
self._prep_ro_instantiate_detectors()
ssro_dict = self.measure_ssro(
no_figs=no_figs, update=update,
prepare=True, disable_metadata=disable_metadata,
nr_shots_per_case=nr_shots_per_case,
post_select=post_select,
post_select_threshold=post_select_threshold)
return ssro_dict
if verify:
warnings.warn('Not verifying as settings were not updated.')
return True
##########################################################################
# measure_ functions (overrides for class Qubit)
# NB: functions closely related to overrides are also also included here
##########################################################################
def measure_heterodyne_spectroscopy(self, freqs, MC=None,
analyze=True, close_fig=True,
label=''):
"""
Measures a transmission through the feedline as a function of frequency.
Usually used to find and characterize the resonators in routines such as
find_resonators or find_resonator_frequency.
Args:
freqs (array):
list of frequencies to sweep over
analyze (bool):
indicates whether to perform a hanger model
fit to the data
label (str):
suffix to append to the measurement label
"""
UHFQC = self.instr_acquisition.get_instr()
self.prepare_for_continuous_wave()
if MC is None:
MC = self.instr_MC.get_instr()
# Starting specmode if set in config
if self.cfg_spec_mode():
UHFQC.spec_mode_on(acq_length=self.ro_acq_integration_length(),
IF=self.ro_freq_mod(),
ro_amp=self.ro_pulse_amp_CW())
# Snippet here to create and upload the CCL instructions
CCL = self.instr_CC.get_instr()
CCL.stop()
p = sqo.CW_RO_sequence(qubit_idx=self.cfg_qubit_nr(),
platf_cfg=self.cfg_openql_platform_fn())
CCL.eqasm_program(p.filename)
# CCL gets started in the int_avg detector
MC.set_sweep_function(swf.Heterodyne_Frequency_Sweep_simple(
MW_LO_source=self.instr_LO_ro.get_instr(),
IF=self.ro_freq_mod()))
MC.set_sweep_points(freqs)
self.int_avg_det_single._set_real_imag(False)
MC.set_detector_function(self.int_avg_det_single)
MC.run(name='Resonator_scan'+self.msmt_suffix+label)
# Stopping specmode
if self.cfg_spec_mode():
UHFQC.spec_mode_off()
self._prep_ro_pulse(upload=True)
if analyze:
ma.Homodyne_Analysis(label=self.msmt_suffix, close_fig=close_fig)
def measure_resonator_power(self, freqs, powers, MC=None,
analyze: bool = True, close_fig: bool = True,
label: str = ''):
"""
Mesures the readout resonator with UHFQC as a function of the pulse power.
The pulse power is controlled by changing the amplitude of the UHFQC-generated
waveform.
Args:
freqs (array):
list of freqencies to sweep over
powers (array):
powers of the readout pulse to sweep over. The power is adjusted
by changing the amplitude of the UHFQC output channels. Thereby
the range of powers is limited by the dynamic range of mixers.
"""
self.prepare_for_continuous_wave()
if MC is None:
MC = self.instr_MC.get_instr()
# Snippet here to create and upload the CCL instructions
CCL = self.instr_CC.get_instr()
CCL.stop()
p = sqo.CW_RO_sequence(qubit_idx=self.cfg_qubit_nr(),
platf_cfg=self.cfg_openql_platform_fn())
CCL.eqasm_program(p.filename)
# CCL gets started in the int_avg detector
MC.set_sweep_function(swf.Heterodyne_Frequency_Sweep_simple(
MW_LO_source=self.instr_LO_ro.get_instr(),
IF=self.ro_freq_mod()))
MC.set_sweep_points(freqs)
ro_lm = self.instr_LutMan_RO.get_instr()
m_amp_par = ro_lm.parameters[
'M_amp_R{}'.format(self.cfg_qubit_nr())]
s2 = swf.lutman_par_dB_attenuation_UHFQC_dig_trig(
LutMan=ro_lm, LutMan_parameter=m_amp_par)
MC.set_sweep_function_2D(s2)
MC.set_sweep_points_2D(powers)
self.int_avg_det_single._set_real_imag(False)
MC.set_detector_function(self.int_avg_det_single)
MC.run(name='Resonator_power_scan'+self.msmt_suffix+label, mode='2D')
if analyze:
ma.TwoD_Analysis(label='Resonator_power_scan',
close_fig=close_fig, normalize=True)
def measure_ssro(self, MC=None,
nr_shots_per_case: int = 2**13, # 8192
cases=('off', 'on'),
prepare: bool = True, no_figs: bool = False,
post_select: bool = False,
post_select_threshold: float = None,
nr_flux_dance:float=None,
wait_time:float=None,
update: bool = True,
SNR_detector: bool = False,
shots_per_meas: int = 2**16,
vary_residual_excitation: bool = True,
disable_metadata: bool = False, label: str = ''):
"""
Performs a number of single shot measurements with qubit in ground and excited state
to extract the SNR and readout fidelities.
Args:
analyze (bool):
should the analysis be executed
nr_shots_per_case (int):
total number of measurements in qubit ground and excited state
cases:
currently unused
update_threshold (bool):
indicating whether to update a threshold according
to which the shot is classified as ground or excited state.
prepare (bool):
should the prepare_for_timedomain be executed?
SNR_detector (bool):
the function will return a dictionary suitable, making this function
easier to use as a detector in the nested measurement
shots_per_meas (int):
number of single shot measurements per single
acquisition with UHFQC
vary_residual_excitation (bool):
if False, uses the last known values of residual excitation
and measurement induced relaxation and keeps these fixed.
...
"""
# off and on, not including post selection init measurements yet
nr_shots = nr_shots_per_case*2
old_RO_digit = self.ro_acq_digitized()
self.ro_acq_digitized(False)
if MC is None:
MC = self.instr_MC.get_instr()
# plotting really slows down SSRO (16k shots plotting is slow)
old_plot_setting = MC.live_plot_enabled()
MC.live_plot_enabled(False)
if prepare:
self.prepare_for_timedomain()
# This snippet causes 0.08 s of overhead but is dangerous to bypass
p = sqo.off_on(
qubit_idx=self.cfg_qubit_nr(), pulse_comb='off_on',
nr_flux_dance=nr_flux_dance,
wait_time=wait_time,
initialize=post_select,
platf_cfg=self.cfg_openql_platform_fn())
self.instr_CC.get_instr().eqasm_program(p.filename)
# digitization setting is reset here but the detector still uses
# the disabled setting that was set above
self.ro_acq_digitized(old_RO_digit)
s = swf.OpenQL_Sweep(openql_program=p,
CCL=self.instr_CC.get_instr(),
parameter_name='Shot', unit='#',
upload=prepare)
MC.soft_avg(1) # don't want to average single shots
MC.set_sweep_function(s)
MC.set_sweep_points(np.arange(nr_shots))
d = self.int_log_det
d.nr_shots = np.min([shots_per_meas, nr_shots])
MC.set_detector_function(d)
MC.run('SSRO_{}{}'.format(label, self.msmt_suffix),
disable_snapshot_metadata=disable_metadata)
MC.live_plot_enabled(old_plot_setting)
######################################################################
# SSRO Analysis
######################################################################
if post_select_threshold == None:
post_select_threshold = self.ro_acq_threshold()
options_dict = {'post_select': post_select,
'nr_samples': 2+2*post_select,
'post_select_threshold': post_select_threshold,
'predict_qubit_temp': True,
'qubit_freq': self.freq_qubit()}
if not vary_residual_excitation:
options_dict.update(
{'fixed_p10': self.res_exc,
'fixed_p01': self.mmt_rel})
a = ma2.ra.Singleshot_Readout_Analysis(
options_dict=options_dict,
extract_only=no_figs)
######################################################################
# Update parameters in the qubit object based on the analysis
######################################################################
if update:
self.res_exc = a.proc_data_dict['quantities_of_interest']['residual_excitation']
self.mmt_rel = a.proc_data_dict['quantities_of_interest']['relaxation_events']
# UHFQC threshold is wrong, the magic number is a
# dirty hack. This works. we don't know why.
magic_scale_factor = 1 # 0.655
self.ro_acq_threshold(
a.proc_data_dict['threshold_raw'] *
magic_scale_factor)
self.F_ssro(a.proc_data_dict['F_assignment_raw'])
self.F_discr(a.proc_data_dict['F_discr'])
self.ro_rel_events(
a.proc_data_dict['quantities_of_interest']['relaxation_events'])
self.ro_res_ext(
a.proc_data_dict['quantities_of_interest']['residual_excitation'])
warnings.warn("FIXME rotation angle could not be set")
# self.ro_acq_rotated_SSB_rotation_angle(a.theta)
return {'SNR': a.qoi['SNR'],
'F_d': a.qoi['F_d'],
'F_a': a.qoi['F_a'],
'relaxation': a.proc_data_dict['relaxation_events'],
'excitation': a.proc_data_dict['residual_excitation']}
def measure_spectroscopy(self, freqs, mode='pulsed_marked', MC=None,
analyze=True, close_fig=True, label='',
prepare_for_continuous_wave=True):
"""
Performs a two-tone spectroscopy experiment where one tone is kept
fixed at the resonator readout frequency and another frequency is swept.
args:
freqs (array) : Frequency range you want to sweep
mode (string): 'CW' - Continuous wave
'pulsed_marked' - pulsed using trigger input of
spec source
'pulsed_mixer' - pulsed using AWG and mixer
analyze: indicates whether to look for the peak in the data
and perform a fit
label: suffix to append to the measurement label
This experiment can be performed in three different modes
Continuous wave (CW)
Pulsed, marker modulated
Pulsed, mixer modulated
The mode argument selects which mode is being used and redirects the
arguments to the appropriate method.
"""
if mode == 'CW':
self.measure_spectroscopy_CW(freqs=freqs, MC=MC,
analyze=analyze, close_fig=close_fig,
label=label,
prepare_for_continuous_wave=prepare_for_continuous_wave)
elif mode == 'pulsed_marked':
self.measure_spectroscopy_pulsed_marked(
freqs=freqs, MC=MC,
analyze=analyze, close_fig=close_fig,
label=label,
prepare_for_continuous_wave=prepare_for_continuous_wave)
elif mode == 'pulsed_mixer':
self.measure_spectroscopy_pulsed_mixer(
freqs=freqs, MC=MC,
analyze=analyze, close_fig=close_fig,
label=label,
prepare_for_timedomain=prepare_for_continuous_wave)
else:
logging.error('Mode {} not recognized. Available modes: "CW", \
"pulsed_marked", "pulsed_mixer"'.format(mode))
def measure_transients(self, MC=None, analyze: bool = True,
cases=('off', 'on'),
prepare: bool = True, depletion_analysis: bool = True,
depletion_analysis_plot: bool = True,
depletion_optimization_window=None,
disable_metadata: bool = False,
plot_max_time=None):
# docstring from parent class
if MC is None:
MC = self.instr_MC.get_instr()
if plot_max_time is None:
plot_max_time = self.ro_acq_integration_length()+250e-9
if prepare:
self.prepare_for_timedomain()
# off/on switching is achieved by turning the MW source on and
# off as this is much faster than recompiling/uploading
p = sqo.off_on(
qubit_idx=self.cfg_qubit_nr(), pulse_comb='on',
initialize=False,
platf_cfg=self.cfg_openql_platform_fn())
self.instr_CC.get_instr().eqasm_program(p.filename)
else:
p = None # object needs to exist for the openql_sweep to work
transients = []
for i, pulse_comb in enumerate(cases):
if 'off' in pulse_comb.lower():
self.instr_LO_mw.get_instr().off()
elif 'on' in pulse_comb.lower():
self.instr_LO_mw.get_instr().on()
else:
raise ValueError(
"pulse_comb {} not understood: Only 'on' and 'off' allowed.".format(pulse_comb))
s = swf.OpenQL_Sweep(openql_program=p,
CCL=self.instr_CC.get_instr(),
parameter_name='Transient time', unit='s',
upload=prepare)
MC.set_sweep_function(s)
if 'UHFQC' in self.instr_acquisition():
sampling_rate = 1.8e9
else:
raise NotImplementedError()
MC.set_sweep_points(
np.arange(self.input_average_detector.nr_samples) /
sampling_rate)
MC.set_detector_function(self.input_average_detector)
data = MC.run(
'Measure_transients{}_{}'.format(self.msmt_suffix, i),
disable_snapshot_metadata=disable_metadata)
dset = data['dset']
transients.append(dset.T[1:])
if analyze:
ma.MeasurementAnalysis()
if depletion_analysis:
a = ma.Input_average_analysis(
IF=self.ro_freq_mod(),
optimization_window=depletion_optimization_window,
plot=depletion_analysis_plot,
plot_max_time=plot_max_time)
return a
else:
return [np.array(t, dtype=np.float64) for t in transients]
def measure_rabi(self, MC=None, amps=np.linspace(0, 1, 31),
analyze=True, close_fig=True, real_imag=True,
prepare_for_timedomain=True, all_modules=False):
"""
Perform a Rabi experiment in which amplitude of the MW pulse is sweeped
while the drive frequency and pulse duration is kept fixed
Args:
amps (array):
range of amplitudes to sweep. If cfg_with_vsm()==True pulse amplitude
is adjusted by sweeping the attenuation of the relevant gaussian VSM channel,
in max range (0.1 to 1.0).
If cfg_with_vsm()==False adjusts the channel amplitude of the AWG in range (0 to 1).
Relevant parameters:
mw_amp180 (float):
amplitude of the waveform corresponding to pi pulse (from 0 to 1)
mw_channel_amp (float):
AWG channel amplitude (digitally scaling the waveform; form 0 to 1)
"""
if self.cfg_with_vsm():
self.measure_rabi_vsm(MC, amps,
analyze, close_fig, real_imag,
prepare_for_timedomain, all_modules)
else:
self.measure_rabi_channel_amp(MC, amps,
analyze, close_fig, real_imag,
prepare_for_timedomain)
def measure_rabi_vsm(self, MC=None, amps=np.linspace(0.1, 1.0, 31),
analyze=True, close_fig=True, real_imag=True,
prepare_for_timedomain=True, all_modules=False):
"""
Perform a Rabi experiment in which amplitude of the MW pulse is sweeped
while the drive frequency and pulse duration is kept fixed
Args:
amps (array):
range of amplitudes to sweep. Pulse amplitude is adjusted by sweeping
the attenuation of the relevant gaussian VSM channel,
in max range (0.1 to 1.0).
"""
if MC is None:
MC = self.instr_MC.get_instr()
if prepare_for_timedomain:
self.prepare_for_timedomain()
p = sqo.off_on(
qubit_idx=self.cfg_qubit_nr(), pulse_comb='on',
initialize=False,
platf_cfg=self.cfg_openql_platform_fn())
VSM = self.instr_VSM.get_instr()
mod_out = self.mw_vsm_mod_out()
ch_in = self.mw_vsm_ch_in()
if all_modules:
mod_sweep = []
for i in range(8):
VSM.set('mod{}_ch{}_marker_state'.format(i+1, ch_in), 'on')
G_par = VSM.parameters['mod{}_ch{}_gaussian_amp'.format(
i+1, ch_in)]
D_par = VSM.parameters['mod{}_ch{}_derivative_amp'.format(
i+1, ch_in)]
mod_sweep.append(swf.two_par_joint_sweep(
G_par, D_par, preserve_ratio=False))
s = swf.multi_sweep_function(sweep_functions=mod_sweep,
retrieve_value=True)
else:
G_par = VSM.parameters['mod{}_ch{}_gaussian_amp'.format(
mod_out, ch_in)]
D_par = VSM.parameters['mod{}_ch{}_derivative_amp'.format(
mod_out, ch_in)]
s = swf.two_par_joint_sweep(G_par, D_par, preserve_ratio=False,
retrieve_value=True, instr=VSM)
self.instr_CC.get_instr().eqasm_program(p.filename)
MC.set_sweep_function(s)
MC.set_sweep_points(amps)
# real_imag is acutally not polar and as such works for opt weights
self.int_avg_det_single._set_real_imag(real_imag)
MC.set_detector_function(self.int_avg_det_single)
MC.run(name='rabi_'+self.msmt_suffix)
ma.Rabi_Analysis(label='rabi_')
return True
def measure_rabi_channel_amp(self, MC=None, amps=np.linspace(0, 1, 31),
analyze=True, close_fig=True, real_imag=True,
prepare_for_timedomain=True):
"""
Perform a Rabi experiment in which amplitude of the MW pulse is sweeped
while the drive frequency and pulse duration is kept fixed
Args:
amps (array):
range of amplitudes to sweep. Amplitude is adjusted via the channel
amplitude of the AWG, in max range (0 to 1).
"""
MW_LutMan = self.instr_LutMan_MW.get_instr()
if MC is None:
MC = self.instr_MC.get_instr()
if prepare_for_timedomain:
self.prepare_for_timedomain()
p = sqo.off_on(
qubit_idx=self.cfg_qubit_nr(), pulse_comb='on',
initialize=False,
platf_cfg=self.cfg_openql_platform_fn())
self.instr_CC.get_instr().eqasm_program(p.filename)
s = MW_LutMan.channel_amp
MC.set_sweep_function(s)
MC.set_sweep_points(amps)
# real_imag is acutally not polar and as such works for opt weights
self.int_avg_det_single._set_real_imag(real_imag)
MC.set_detector_function(self.int_avg_det_single)
MC.run(name='rabi_'+self.msmt_suffix)
ma.Rabi_Analysis(label='rabi_')
return True
def measure_allxy(self, MC=None,
label: str = '',
analyze=True, close_fig=True,
prepare_for_timedomain=True):
# docstring from parent class
# N.B. this is a good example for a generic timedomain experiment using
# the CCL transmon.
if MC is None:
MC = self.instr_MC.get_instr()
if prepare_for_timedomain:
self.prepare_for_timedomain()
p = sqo.AllXY(qubit_idx=self.cfg_qubit_nr(), double_points=True,
platf_cfg=self.cfg_openql_platform_fn())
s = swf.OpenQL_Sweep(openql_program=p,
CCL=self.instr_CC.get_instr())
d = self.int_avg_det
MC.set_sweep_function(s)
MC.set_sweep_points(np.arange(42))
MC.set_detector_function(d)
MC.run('AllXY'+label+self.msmt_suffix)
if analyze:
a = ma.AllXY_Analysis(close_main_fig=close_fig)
return a.deviation_total
def measure_T1(
self,
times=None,
update=True,
nr_cz_instead_of_idle_time: list = None,
qb_cz_instead_of_idle_time: str = None,
nr_flux_dance: float = None,
wait_time_after_flux_dance: float = 0,
prepare_for_timedomain=True,
close_fig=True,
analyze=True,
MC=None,
):
"""
N.B. this is a good example for a generic timedomain experiment using
the CCL transmon.
"""
if times and nr_cz_instead_of_idle_time:
raise ValueError("Either idle time or CZ mode must be chosen!")
if nr_cz_instead_of_idle_time is not None and not qb_cz_instead_of_idle_time:
raise ValueError("If CZ instead of idle time should be used, qubit to apply CZ to must be given!")
if qb_cz_instead_of_idle_time:
qb_cz_idx = self.find_instrument(qb_cz_instead_of_idle_time).cfg_qubit_nr()
if MC is None:
MC = self.instr_MC.get_instr()
if times is None:
if nr_cz_instead_of_idle_time is not None:
# convert given numbers of CZs into time
# NOTE: CZ time hardcoded to 40ns!
times = np.array(nr_cz_instead_of_idle_time) * 40e-9
else:
# default timing: 4 x current T1
times = np.linspace(0, self.T1() * 4, 31)
if nr_cz_instead_of_idle_time is not None:
# define time for calibration points at sufficiently distant times
dt = 10 * 40e-9 # (times[-1] - times[-2])/2
else:
# append the calibration points, times are for location in plot
dt = times[1] - times[0]
times = np.concatenate([times, (times[-1] + 1 * dt,
times[-1] + 2 * dt,
times[-1] + 3 * dt,
times[-1] + 4 * dt)])
if prepare_for_timedomain:
self.prepare_for_timedomain()
p = sqo.T1(qubit_idx=self.cfg_qubit_nr(),
platf_cfg=self.cfg_openql_platform_fn(),
times=times,
nr_cz_instead_of_idle_time=nr_cz_instead_of_idle_time,
qb_cz_idx=qb_cz_idx if qb_cz_instead_of_idle_time else None,
nr_flux_dance=nr_flux_dance,
wait_time_after_flux_dance=wait_time_after_flux_dance)
s = swf.OpenQL_Sweep(openql_program=p,
parameter_name='Time',
unit='s',
CCL=self.instr_CC.get_instr())
d = self.int_avg_det
MC.set_sweep_function(s)
MC.set_sweep_points(times)
MC.set_detector_function(d)
MC.run('T1' + self.msmt_suffix)
if analyze:
a = ma.T1_Analysis(auto=True, close_fig=True)
if update:
self.T1(a.T1)
return a.T1
def measure_T1_2nd_excited_state(self, times=None, MC=None,
analyze=True, close_fig=True, update=True,
prepare_for_timedomain=True):
"""
Performs a T1 experiment on the 2nd excited state.
"""
if MC is None:
MC = self.instr_MC.get_instr()
# default timing
if times is None:
times = np.linspace(0, self.T1() * 4, 31)
if prepare_for_timedomain:
self.prepare_for_timedomain()
# Load pulses to the ef transition
mw_lutman = self.instr_LutMan_MW.get_instr()
mw_lutman.load_ef_rabi_pulses_to_AWG_lookuptable()
p = sqo.T1_second_excited_state(times, qubit_idx=self.cfg_qubit_nr(),
platf_cfg=self.cfg_openql_platform_fn())
s = swf.OpenQL_Sweep(openql_program=p,
parameter_name='Time',
unit='s',
CCL=self.instr_CC.get_instr())
d = self.int_avg_det
MC.set_sweep_function(s)
MC.set_sweep_points(p.sweep_points)
MC.set_detector_function(d)
MC.run('T1_2nd_exc_state_' + self.msmt_suffix)
a = ma.T1_Analysis(auto=True, close_fig=True)
return a.T1
def measure_ramsey(self, times=None, MC=None,
artificial_detuning: float = None,
freq_qubit: float = None,
label: str = '',
prepare_for_timedomain=True,
analyze=True, close_fig=True, update=True,
detector=False,
double_fit=False,
test_beating=True):
# docstring from parent class
# N.B. this is a good example for a generic timedomain experiment using
# the CCL transmon.
if MC is None:
MC = self.instr_MC.get_instr()
# default timing
if times is None:
# funny default is because there is no real time sideband modulation
stepsize = max((self.T2_star() * 4 / 61) // (abs(self.cfg_cycle_time()))
* abs(self.cfg_cycle_time()), 40e-9)
times = np.arange(0, self.T2_star() * 4, stepsize)
if artificial_detuning is None:
# artificial_detuning = 0
# raise ImplementationError("Artificial detuning does not work, currently uses real detuning")
# artificial_detuning = 3/times[-1]
artificial_detuning = 5 / times[-1]
# append the calibration points, times are for location in plot
dt = times[1] - times[0]
times = np.concatenate([times,
(times[-1] + 1 * dt,
times[-1] + 2 * dt,
times[-1] + 3 * dt,
times[-1] + 4 * dt)])
if prepare_for_timedomain:
self.prepare_for_timedomain()
# adding 'artificial' detuning by detuning the qubit LO
if freq_qubit is None:
freq_qubit = self.freq_qubit()
# this should have no effect if artificial detuning = 0. This is a bug,
# this is real detuning, not artificial detuning
old_frequency = self.instr_LO_mw.get_instr().get('frequency')
self.instr_LO_mw.get_instr().set(
'frequency', freq_qubit -
self.mw_freq_mod.get() + artificial_detuning)
p = sqo.Ramsey(times, qubit_idx=self.cfg_qubit_nr(),
platf_cfg=self.cfg_openql_platform_fn())
s = swf.OpenQL_Sweep(openql_program=p,
CCL=self.instr_CC.get_instr(),
parameter_name='Time', unit='s')
MC.set_sweep_function(s)
MC.set_sweep_points(times)
d = self.int_avg_det
MC.set_detector_function(d)
MC.run('Ramsey' + label + self.msmt_suffix)
# Restore old frequency value
self.instr_LO_mw.get_instr().set('frequency', old_frequency)
if analyze:
a = ma.Ramsey_Analysis(auto=True, close_fig=True,
freq_qubit=freq_qubit,
artificial_detuning=artificial_detuning)
if test_beating and a.fit_res.chisqr > 0.4:
logging.warning('Found double frequency in Ramsey: large '
'deviation found in single frequency fit.'
'Trying double frequency fit.')
double_fit = True
if update:
self.T2_star(a.T2_star['T2_star'])
if double_fit:
b = ma.DoubleFrequency()
res = {
'T2star1': b.tau1,
'T2star2': b.tau2,
'frequency1': b.f1,
'frequency2': b.f2
}
return res
else:
res = {
'T2star': a.T2_star['T2_star'],
'frequency': a.qubit_frequency,
}
return res
def measure_complex_ramsey(self, times=None, MC=None,
freq_qubit: float = None,
label: str = '',
prepare_for_timedomain=True,
analyze=True, close_fig=True, update=True,
detector=False,
double_fit=False,
test_beating=True):
# docstring from parent class
# N.B. this is a good example for a generic timedomain experiment using
# the CCL transmon.
if MC is None:
MC = self.instr_MC.get_instr()
# readout must use IQ data
old_ro_type = self.ro_acq_weight_type()
self.ro_acq_weight_type('optimal IQ')
# default timing
if times is None:
# funny default is because there is no real time sideband
# modulation
stepsize = max((self.T2_star() * 4 / 61) // (abs(self.cfg_cycle_time()))
* abs(self.cfg_cycle_time()), 40e-9)
times = np.arange(0, self.T2_star() * 4, stepsize)
# append the calibration points, times are for location in plot
dt = times[1] - times[0]
times = np.concatenate([np.repeat(times, 2),
(times[-1] + 1 * dt,
times[-1] + 2 * dt,
times[-1] + 3 * dt,
times[-1] + 4 * dt)])
if prepare_for_timedomain:
self.prepare_for_timedomain()
# adding 'artificial' detuning by detuning the qubit LO
if freq_qubit is None:
freq_qubit = self.freq_qubit()
# # this should have no effect if artificial detuning = 0. This is a bug,
# This is real detuning, not artificial detuning
p = sqo.complex_Ramsey(times, qubit_idx=self.cfg_qubit_nr(),
platf_cfg=self.cfg_openql_platform_fn())
s = swf.OpenQL_Sweep(openql_program=p,
CCL=self.instr_CC.get_instr(),
parameter_name='Time', unit='s')
MC.set_sweep_function(s)
MC.set_sweep_points(times)
d = self.int_avg_det
MC.set_detector_function(d)
MC.run('complex_Ramsey' + label + self.msmt_suffix)
self.ro_acq_weight_type(old_ro_type)
if analyze:
a = ma2.ComplexRamseyAnalysis(label='complex_Ramsey', close_figs=True)
if update:
fit_res = a.fit_dicts['exp_fit']['fit_res']
fit_frequency = fit_res.params['frequency'].value
freq_qubit = self.freq_qubit()
self.freq_qubit(freq_qubit + fit_frequency)
# if test_beating and a.fit_res.chisqr > 0.4:
# logging.warning('Found double frequency in Ramsey: large '
# 'deviation found in single frequency fit.'
# 'Trying double frequency fit.')
# double_fit = True
# if update:
# self.T2_star(a.T2_star['T2_star'])
# if double_fit:
# b = ma.DoubleFrequency()
# res = {
# 'T2star1': b.tau1,
# 'T2star2': b.tau2,
# 'frequency1': b.f1,
# 'frequency2': b.f2
# }
# return res
# else:
# res = {
# 'T2star': a.T2_star['T2_star'],
# 'frequency': a.qubit_frequency,
# }
# return res
def measure_echo(self, times=None, MC=None,
analyze=True, close_fig=True, update=True,
label: str = '', prepare_for_timedomain=True):
# docstring from parent class
# N.B. this is a good example for a generic timedomain experiment using
# the CCL transmon.
if MC is None:
MC = self.instr_MC.get_instr()
# default timing
if times is None:
# funny default is because there is no real time sideband
# modulation
stepsize = max((self.T2_echo()*2/61)//(abs(self.cfg_cycle_time()))
* abs(self.cfg_cycle_time()), 20e-9)
times = np.arange(0, self.T2_echo()*4, stepsize*2)
# append the calibration points, times are for location in plot
dt = times[1] - times[0]
times = np.concatenate([times,
(times[-1]+1*dt,
times[-1]+2*dt,
times[-1]+3*dt,
times[-1]+4*dt)])
mw_lutman = self.instr_LutMan_MW.get_instr()
# # Checking if pulses are on 20 ns grid
if not all([np.round(t*1e9) % (2*self.cfg_cycle_time()*1e9) == 0 for
t in times]):
raise ValueError('timesteps must be multiples of 40e-9')
# # Checking if pulses are locked to the pulse modulation
if not all([np.round(t/1*1e9) % (2/self.mw_freq_mod.get()*1e9) == 0 for t in times]) and\
mw_lutman.cfg_sideband_mode() != 'real-time':
raise ValueError(
'timesteps must be multiples of 2 modulation periods')
if prepare_for_timedomain:
self.prepare_for_timedomain()
mw_lutman.load_phase_pulses_to_AWG_lookuptable()
p = sqo.echo(times, qubit_idx=self.cfg_qubit_nr(),
platf_cfg=self.cfg_openql_platform_fn())
s = swf.OpenQL_Sweep(openql_program=p,
CCL=self.instr_CC.get_instr(),
parameter_name="Time", unit="s")
d = self.int_avg_det
MC.set_sweep_function(s)
MC.set_sweep_points(times)
MC.set_detector_function(d)
MC.run('echo'+label+self.msmt_suffix)
if analyze:
# N.B. v1.5 analysis
a = ma.Echo_analysis_V15(label='echo', auto=True, close_fig=True)
if update:
self.T2_echo(a.fit_res.params['tau'].value)
return a
def measure_flipping(self, number_of_flips=np.arange(0, 61, 2), equator=True,
MC=None, analyze=True, close_fig=True, update=False,
ax='x', angle='180'):
"""
Measurement for fine-tuning of the pi and pi/2 pulse amplitudes. Executes sequence
pi (repeated N-times) - pi/2 - measure
with variable number N. In this way the error in the amplitude of the MW pi pulse
accumulate allowing for fine tuning. Alternatively N repetitions of the pi pulse
can be replaced by 2N repetitions of the pi/2-pulse
Args:
number_of_flips (array):
number of pi pulses to apply. It is recommended to use only even numbers,
since then the expected signal has a sine shape. Otherwise it has -1^N * sin shape
which will not be correctly analyzed.
equator (bool);
specify whether to apply the final pi/2 pulse. Setting to False makes the sequence
first-order insensitive to pi-pulse amplitude errors.
ax (str {'x', 'y'}):
axis arour which the pi pulses are to be performed. Possible values 'x' or 'y'
angle (str {'90', '180'}):
specifies whether to apply pi or pi/2 pulses. Possible values: '180' or '90'
update (bool):
specifies whether to update parameter controlling MW pulse amplitude.
This parameter is mw_vsm_G_amp in VSM case or mw_channel_amp in no-VSM case.
Update is performed only if change by more than 0.2% (0.36 deg) is needed.
"""
if MC is None:
MC = self.instr_MC.get_instr()
# allow flipping only with pi/2 or pi, and x or y pulses
assert angle in ['90','180']
assert ax.lower() in ['x', 'y']
# append the calibration points, times are for location in plot
nf = np.array(number_of_flips)
dn = nf[1] - nf[0]
nf = np.concatenate([nf,
(nf[-1]+1*dn,
nf[-1]+2*dn,
nf[-1]+3*dn,
nf[-1]+4*dn) ])
self.prepare_for_timedomain()
p = sqo.flipping(number_of_flips=nf, equator=equator,
qubit_idx=self.cfg_qubit_nr(),
platf_cfg=self.cfg_openql_platform_fn(),
ax=ax.lower(), angle=angle)
s = swf.OpenQL_Sweep(openql_program=p,
unit='#',
CCL=self.instr_CC.get_instr())
d = self.int_avg_det
MC.set_sweep_function(s)
MC.set_sweep_points(nf)
MC.set_detector_function(d)
MC.run('flipping_'+ax+angle+self.msmt_suffix)
if analyze:
a = ma2.FlippingAnalysis(
options_dict={'scan_label': 'flipping'})
if update:
# choose scale factor based on simple goodness-of-fit comparison
# This method gives priority to the line fit:
# the cos fit will only be chosen if its chi^2 relative to the
# chi^2 of the line fit is at least 10% smaller
if (a.fit_res['line_fit'].chisqr - a.fit_res['cos_fit'].chisqr)/a.fit_res['line_fit'].chisqr \
> 0.1:
scale_factor = a._get_scale_factor_cos()
else:
scale_factor = a._get_scale_factor_line()
if abs(scale_factor-1) < 1e-3:
print('Pulse amplitude accurate within 0.1%. Amplitude not updated.')
return a
if angle == '180':
if self.cfg_with_vsm():
amp_old = self.mw_vsm_G_amp()
self.mw_vsm_G_amp(scale_factor*amp_old)
else:
amp_old = self.mw_channel_amp()
self.mw_channel_amp(scale_factor*amp_old)
elif angle == '90':
amp_old = self.mw_amp90_scale()
self.mw_amp90_scale(scale_factor*amp_old)
print('Pulse amplitude for {}-{} pulse changed from {:.3f} to {:.3f}'.format(
ax, angle, amp_old, scale_factor*amp_old))
return a
def flipping_GBT(self, nr_sequence: int = 2):
'''
This function is to measure flipping sequence for whaterver nr_of times
a function needs to be run to calibrate the Pi and Pi/2 Pulse.
Right now this method will always return true no matter what
Later we can add a condition as a check.
'''
for i in range(nr_sequence):
a = self.measure_flipping(update=True)
scale_factor = a._get_scale_factor_line()
if abs(1-scale_factor)<0.0005:
return True
else:
return False
def measure_motzoi(self, motzoi_amps=None,
prepare_for_timedomain: bool = True,
MC=None, analyze=True, close_fig=True):
"""
Sweeps the amplitude of the DRAG coefficients looking for leakage reduction
and optimal correction for the phase error due to stark shift resulting
from transition to higher qubit states. In this measurement the two-pulse
sequence are applied:
X180-Y90 and Y180-X90 and the amplitude of the gaussian-derivative component
of the MW pulse is sweeped. When the DRAG coefficient is adjusted correctly
the two sequences yield the same result.
Refs:
Motzoi PRL 103, 110501 (2009)
Chow PRA 82, 040305(R) (2010)
Lucero PRA 82, 042339 (2010)
Args:
motzoi_amps (array):
DRAG coefficients to sweep over. In VSM case the amplitude
is adjusted by varying attenuation of the derivative channel for the
relevant module. In no-VSM the DRAG parameter is adjusted by reloading
of the waveform on the AWG.
Returns:
float:
value of the DRAG parameter for which the two sequences yield the same result
error is mimimized.
"""
using_VSM = self.cfg_with_vsm()
MW_LutMan = self.instr_LutMan_MW.get_instr()
AWG = MW_LutMan.AWG.get_instr()
if MC is None:
MC = self.instr_MC.get_instr()
if prepare_for_timedomain:
self.prepare_for_timedomain()
p = sqo.motzoi_XY(
qubit_idx=self.cfg_qubit_nr(),
platf_cfg=self.cfg_openql_platform_fn())
self.instr_CC.get_instr().eqasm_program(p.filename)
d = self.get_int_avg_det(single_int_avg=True, values_per_point=2,
values_per_point_suffex=['yX', 'xY'],
always_prepare=True)
if using_VSM:
VSM = self.instr_VSM.get_instr()
if motzoi_amps is None:
motzoi_amps = np.linspace(0.1, 1.0, 31)
mod_out = self.mw_vsm_mod_out()
ch_in = self.mw_vsm_ch_in()
D_par = VSM.parameters['mod{}_ch{}_derivative_amp'.format(
mod_out, ch_in)]
swf_func = wrap_par_to_swf(D_par, retrieve_value=True)
else:
if self._using_QWG():
if motzoi_amps is None:
motzoi_amps = np.linspace(-.3, .3, 31)
swf_func = swf.QWG_lutman_par(LutMan=MW_LutMan,
LutMan_parameter=MW_LutMan.mw_motzoi)
else:
if motzoi_amps is None:
motzoi_amps = np.linspace(-.3, .3, 31)
swf_func = swf.lutman_par(LutMan=MW_LutMan,
LutMan_parameter=MW_LutMan.mw_motzoi)
MC.set_sweep_function(swf_func)
MC.set_sweep_points(motzoi_amps)
MC.set_detector_function(d)
MC.run('Motzoi_XY'+self.msmt_suffix)
if analyze:
if self.ro_acq_weight_type() == 'optimal':
a = ma2.Intersect_Analysis(
options_dict={'ch_idx_A': 0,
'ch_idx_B': 1},
normalized_probability=True)
else:
# if statement required if 2 channels readout
logging.warning(
'It is recommended to do this with optimal weights')
a = ma2.Intersect_Analysis(
options_dict={'ch_idx_A': 0,
'ch_idx_B': 1},
normalized_probability=False)
return a
##########################################################################
# measure_ functions (CCLight_Transmon specific)
##########################################################################
def measure_photon_number_splitting(self, freqs, powers, MC=None,
analyze: bool = True, close_fig: bool = True):
"""
Mesures the CW qubit spectrosopy as a function of the RO pulse power
to find a photon splitting.
Refs:
Schuster Nature 445, 515–518 (2007)
(note that in the paper RO resonator has lower frequency than the qubit)
Args:
freqs (array):
list of freqencies to sweep over
powers (array):
powers of the readout pulse to sweep over. The power is adjusted
by changing the amplitude of the UHFQC output channels. Thereby
the range of powers is limited by the dynamic range of mixers.
"""
self.prepare_for_continuous_wave()
if MC is None:
MC = self.instr_MC.get_instr()
# Snippet here to create and upload the CCL instructions
CCL = self.instr_CC.get_instr()
CCL.stop()
p = sqo.CW_RO_sequence(qubit_idx=self.cfg_qubit_nr(),
platf_cfg=self.cfg_openql_platform_fn())
CCL.eqasm_program(p.filename)
# CCL gets started in the int_avg detector
spec_source = self.instr_spec_source.get_instr()
spec_source.on()
MC.set_sweep_function(spec_source.frequency)
MC.set_sweep_points(freqs)
ro_lm = self.instr_LutMan_RO.get_instr()
m_amp_par = ro_lm.parameters[
'M_amp_R{}'.format(self.cfg_qubit_nr())]
s2 = swf.lutman_par_dB_attenuation_UHFQC_dig_trig(
LutMan=ro_lm, LutMan_parameter=m_amp_par)
MC.set_sweep_function_2D(s2)
MC.set_sweep_points_2D(powers)
self.int_avg_det_single._set_real_imag(False)
MC.set_detector_function(self.int_avg_det_single)
label = 'Photon_number_splitting'
MC.run(name=label+self.msmt_suffix, mode='2D')
spec_source.off()
if analyze:
ma.TwoD_Analysis(label=label,
close_fig=close_fig, normalize=True)
def measure_resonator_frequency_dac_scan(self, freqs, dac_values, MC=None,
analyze: bool = True, close_fig: bool = True,
fluxChan=None, label=''):
"""
Performs the resonator spectroscopy as a function of the current applied
to the flux bias line.
Args:
freqs (array):
list of freqencies to sweep over
dac_values (array):
list of the DAC values (current values) to sweep over
fluxChan (str):
channel of the instrument controlling the flux to sweep. By default
the channel used is specified by self.fl_dc_ch.
analyze (bool):
indicates whether to generate colormaps of the measured data
label (str):
suffix to append to the measurement label
Relevant qubit parameters:
instr_FluxCtrl (str):
instrument controlling the current bias
fluxChan (str):
chanel of the flux control instrument corresponding to the qubit
"""
self.prepare_for_continuous_wave()
if MC is None:
MC = self.instr_MC.get_instr()
# Snippet here to create and upload the CCL instructions
CCL = self.instr_CC.get_instr()
CCL.stop()
p = sqo.CW_RO_sequence(qubit_idx=self.cfg_qubit_nr(),
platf_cfg=self.cfg_openql_platform_fn())
CCL.eqasm_program(p.filename)
# CCL gets started in the int_avg detector
MC.set_sweep_function(swf.Heterodyne_Frequency_Sweep_simple(
MW_LO_source=self.instr_LO_ro.get_instr(),
IF=self.ro_freq_mod()))
MC.set_sweep_points(freqs)
if 'ivvi' in self.instr_FluxCtrl().lower():
IVVI = self.instr_FluxCtrl.get_instr()
dac_par = IVVI.parameters['dac{}'.format(self.fl_dc_ch())]
else:
# Assume the flux is controlled using an SPI rack
fluxcontrol = self.instr_FluxCtrl.get_instr()
if fluxChan == None:
dac_par = fluxcontrol.parameters[(self.fl_dc_ch())]
else:
dac_par = fluxcontrol.parameters[(fluxChan)]
MC.set_sweep_function_2D(dac_par)
MC.set_sweep_points_2D(dac_values)
self.int_avg_det_single._set_real_imag(False)
MC.set_detector_function(self.int_avg_det_single)
MC.run(name='Resonator_dac_scan'+self.msmt_suffix+label, mode='2D')
if analyze:
ma.TwoD_Analysis(label='Resonator_dac_scan', close_fig=close_fig)
def measure_qubit_frequency_dac_scan(self, freqs, dac_values,
mode='pulsed_marked', MC=None,
analyze=True, fluxChan=None, close_fig=True,
nested_resonator_calibration=False,
nested_resonator_calibration_use_min=False,
resonator_freqs=None,
trigger_idx= None):
"""
Performs the qubit spectroscopy while changing the current applied
to the flux bias line.
Args:
freqs (array):
MW drive frequencies to sweep over
dac_values (array):
values of the current to sweep over
mode (str {'pulsed_mixer', 'CW', 'pulsed_marked'}):
specifies the spectroscopy mode (cf. measure_spectroscopy method)
fluxChan (str):
Fluxchannel that is varied. Defaults to self.fl_dc_ch
nested_resonator_calibration (bool):
specifies whether to track the RO resonator
frequency (which itself is flux-dependent)
nested_resonator_calibration_use_min (bool):
specifies whether to use the resonance
minimum in the nested routine
resonator_freqs (array):
manual specifications of the frequencies over in which to
search for RO resonator in the nested routine
analyze (bool):
indicates whether to generate colormaps of the measured data
label (str):
suffix to append to the measurement label
Relevant qubit parameters:
instr_FluxCtrl (str):
instrument controlling the current bias
fluxChan (str):
chanel of the flux control instrument corresponding to the qubit
"""
if mode == 'pulsed_mixer':
old_channel_amp = self.mw_channel_amp()
self.mw_channel_amp(1)
self.prepare_for_timedomain()
self.mw_channel_amp(old_channel_amp)
elif mode == 'CW' or mode == 'pulsed_marked':
self.prepare_for_continuous_wave()
else:
logging.error('Mode {} not recognized'.format(mode))
if MC is None:
MC = self.instr_MC.get_instr()
if trigger_idx is None:
trigger_idx = self.cfg_qubit_nr()
# Snippet here to create and upload the CCL instructions
CCL = self.instr_CC.get_instr()
if mode == 'pulsed_marked':
p = sqo.pulsed_spec_seq_marked(
qubit_idx=self.cfg_qubit_nr(),
spec_pulse_length=self.spec_pulse_length(),
platf_cfg=self.cfg_openql_platform_fn(),
trigger_idx=trigger_idx)
else:
p = sqo.pulsed_spec_seq(
qubit_idx=self.cfg_qubit_nr(),
spec_pulse_length=self.spec_pulse_length(),
platf_cfg=self.cfg_openql_platform_fn())
CCL.eqasm_program(p.filename)
# CCL gets started in the int_avg detector
if 'ivvi' in self.instr_FluxCtrl().lower():
IVVI = self.instr_FluxCtrl.get_instr()
if fluxChan is None:
dac_par = IVVI.parameters['dac{}'.format(self.fl_dc_ch())]
else:
dac_par = IVVI.parameters[fluxChan]
else:
# Assume the flux is controlled using an SPI rack
fluxcontrol = self.instr_FluxCtrl.get_instr()
if fluxChan == None:
dac_par = fluxcontrol.parameters[(self.fl_dc_ch())]
else:
dac_par = fluxcontrol.parameters[(fluxChan)]
if mode == 'pulsed_mixer':
spec_source = self.instr_spec_source_2.get_instr()
spec_source.on()
else:
spec_source = self.instr_spec_source.get_instr()
spec_source.on()
# if mode == 'pulsed_marked':
# spec_source.pulsemod_state('On')
MC.set_sweep_function(spec_source.frequency)
MC.set_sweep_points(freqs)
if nested_resonator_calibration:
res_updating_dac_par = swf.Nested_resonator_tracker(
qubit=self,
nested_MC=self.instr_nested_MC.get_instr(),
freqs=resonator_freqs,
par=dac_par, use_min=nested_resonator_calibration_use_min,
reload_sequence=True, sequence_file=p, cc=CCL)
MC.set_sweep_function_2D(res_updating_dac_par)
else:
MC.set_sweep_function_2D(dac_par)
MC.set_sweep_points_2D(dac_values)
self.int_avg_det_single._set_real_imag(False)
self.int_avg_det_single.always_prepare = True
MC.set_detector_function(self.int_avg_det_single)
MC.run(name='Qubit_dac_scan'+self.msmt_suffix, mode='2D')
if analyze:
return ma.TwoD_Analysis(label='Qubit_dac_scan',
close_fig=close_fig)
def measure_spectroscopy_CW(self, freqs, MC=None,
analyze=True, close_fig=True, label='',
prepare_for_continuous_wave=True):
"""
Does a CW spectroscopy experiment by sweeping the frequency of a
microwave source.
Relevant qubit parameters:
instr_spec_source (RohdeSchwarz_SGS100A):
instrument used to apply CW excitation
spec_pow (float):
power of the MW excitation at the output of the spec_source (dBm)
label (str):
suffix to append to the measurement label
"""
UHFQC = self.instr_acquisition.get_instr()
if prepare_for_continuous_wave:
self.prepare_for_continuous_wave()
if MC is None:
MC = self.instr_MC.get_instr()
# Starting specmode if set in config
if self.cfg_spec_mode():
UHFQC.spec_mode_on(IF=self.ro_freq_mod(),
ro_amp=self.ro_pulse_amp_CW())
# Snippet here to create and upload the CCL instructions
CCL = self.instr_CC.get_instr()
p = sqo.pulsed_spec_seq(
qubit_idx=self.cfg_qubit_nr(),
spec_pulse_length=self.spec_pulse_length(),
platf_cfg=self.cfg_openql_platform_fn())
CCL.eqasm_program(p.filename)
# CCL gets started in the int_avg detector
spec_source = self.instr_spec_source.get_instr()
spec_source.on()
# Set marker mode off for CW:
if not spec_source.get_idn()['model']=='E8257D':
spec_source.pulsemod_state('Off')
MC.set_sweep_function(spec_source.frequency)
MC.set_sweep_points(freqs)
if self.cfg_spec_mode():
print('Enter loop')
MC.set_detector_function(self.UHFQC_spec_det)
else:
self.int_avg_det_single._set_real_imag(False)
MC.set_detector_function(self.int_avg_det_single)
MC.run(name='CW_spectroscopy'+self.msmt_suffix+label)
# Stopping specmode
if self.cfg_spec_mode():
UHFQC.spec_mode_off()
self._prep_ro_pulse(upload=True)
if analyze:
ma.Homodyne_Analysis(label=self.msmt_suffix, close_fig=close_fig)
def measure_spectroscopy_pulsed_marked(self, freqs, MC=None,
analyze=True, close_fig=True,
label='',
prepare_for_continuous_wave=True,
trigger_idx = None):
"""
Performs a spectroscopy experiment by triggering the spectroscopy source
with a CCLight trigger.
TODO: set the
"""
UHFQC = self.instr_acquisition.get_instr()
if prepare_for_continuous_wave:
self.prepare_for_continuous_wave()
if MC is None:
MC = self.instr_MC.get_instr()
# Starting specmode if set in config
if self.cfg_spec_mode():
UHFQC.spec_mode_on(IF=self.ro_freq_mod(),
ro_amp=self.ro_pulse_amp_CW())
wait_time_ns = self.spec_wait_time()*1e9
if trigger_idx is None:
trigger_idx = self.cfg_qubit_nr()
# Snippet here to create and upload the CCL instructions
CCL = self.instr_CC.get_instr()
p = sqo.pulsed_spec_seq_marked(
qubit_idx=self.cfg_qubit_nr(),
spec_pulse_length=self.spec_pulse_length(),
platf_cfg=self.cfg_openql_platform_fn(),
cc=self.instr_CC(),
trigger_idx=trigger_idx if (CCL.name.upper() == 'CCL' or CCL.name.upper() == 'CC') else 15,
wait_time_ns=wait_time_ns)
CCL.eqasm_program(p.filename)
# CCL gets started in the int_avg detector
spec_source = self.instr_spec_source.get_instr()
spec_source.on()
# Set marker mode off for CW:
spec_source.pulsemod_state('On')
MC.set_sweep_function(spec_source.frequency)
MC.set_sweep_points(freqs)
if self.cfg_spec_mode():
MC.set_detector_function(self.UHFQC_spec_det)
else:
self.int_avg_det_single._set_real_imag(False)
MC.set_detector_function(self.int_avg_det_single)
MC.run(name='pulsed_marker_spectroscopy'+self.msmt_suffix+label)
# Stopping specmode
if self.cfg_spec_mode():
UHFQC.spec_mode_off()
self._prep_ro_pulse(upload=True)
if analyze:
ma.Qubit_Spectroscopy_Analysis(label=self.msmt_suffix,
close_fig=close_fig,
qb_name=self.name)
def measure_spectroscopy_pulsed_mixer(self, freqs, MC=None,
analyze=True, close_fig=True,
label='',
prepare_for_timedomain=True):
"""
Performs pulsed spectroscopy by modulating a cw pulse with a square
which is generated by an AWG. Uses the self.mw_LO as spec source, as
that usually is the LO of the AWG/QWG mixer.
Is considered as a time domain experiment as it utilizes the AWG
Relevant parameters:
spec_pow (float):
power of the LO fed into the mixer
spec_amp (float):
amplitude of the square waveform used to generate
microwave tone
spec_pulse_length (float):
length of the spectroscopy pulse. The length is
controlled by the qisa file, which indicates how many 20 ns long
square pulses should be triggered back-to-back
"""
UHFQC = self.instr_acquisition.get_instr()
if MC is None:
MC = self.instr_MC.get_instr()
# Starting specmode if set in config
if self.cfg_spec_mode():
UHFQC.spec_mode_on(IF=self.ro_freq_mod(),
ro_amp=self.ro_pulse_amp_CW())
# Save current value of mw_channel_amp to make this measurement
# independent of the value.
old_channel_amp = self.mw_channel_amp()
self.mw_channel_amp(1)
if prepare_for_timedomain:
self.prepare_for_timedomain()
# Snippet here to create and upload the CCL instructions
CCL = self.instr_CC.get_instr()
p = sqo.pulsed_spec_seq(
qubit_idx=self.cfg_qubit_nr(),
spec_pulse_length=self.spec_pulse_length(),
platf_cfg=self.cfg_openql_platform_fn())
CCL.eqasm_program(p.filename)
# CCL gets started in the int_avg detector
spec_source = self.instr_spec_source_2.get_instr()
# spec_source.on()
# Set marker mode off for mixer CW:
MC.set_sweep_function(spec_source.frequency)
MC.set_sweep_points(freqs)
if self.cfg_spec_mode():
print('Enter loop')
MC.set_detector_function(self.UHFQC_spec_det)
else:
self.int_avg_det_single._set_real_imag(False)
MC.set_detector_function(self.int_avg_det_single)
# d = self.int_avg_det
# MC.set_detector_function(d)
MC.run(name='pulsed_mixer_spectroscopy'+self.msmt_suffix+label)
self.mw_channel_amp(old_channel_amp)
# Stopping specmode
if self.cfg_spec_mode():
UHFQC.spec_mode_off()
self._prep_ro_pulse(upload=True)
if analyze:
ma.Qubit_Spectroscopy_Analysis(label=self.msmt_suffix,
close_fig=close_fig,
qb_name=self.name)
def measure_anharmonicity(self, freqs_01=None, freqs_12=None, f_01_power=None,
f_12_power=None,
MC=None, spec_source_2=None,
mode='pulsed_marked',step_size:int= 1e6):
"""
Measures the qubit spectroscopy as a function of frequency of the two
driving tones. The qubit transitions are observed when frequency of one
drive matches the qubit frequency, or when sum of frequencies matches
energy difference between ground and second excited state. Consequently
frequency of 01 and 12 transitions can be extracted simultaneously
yoielding anharmonicity measurement.
Typically a good guess for the 12 transition frequencies is
f01 + alpha where alpha is the anharmonicity and typically ~ -300 MHz
Args:
freqs_01: frequencies of the first qubit drive
freqs_12: frequencies of the second qubit drive
f_01_power: power of the first qubit drive. By default the power
is set to self.spec_pow
f_12_power: power of the second qubit drive. By default the power
is set to self.spec_pow. Likely it needs to be increased
by 10-20 dB to yield meaningful result
spec_source_2: instrument used to apply second MW drive.
By default instrument specified by self.instr_spec_source_2 is used
mode (str):
if pulsed_marked uses pulsed spectroscopy sequence assuming
that the sources are pulsed using a marker.
Otherwise, uses CW spectroscopy.
"""
# f_anharmonicity = np.mean(freqs_01) - np.mean(freqs_12)
# if f_01_power == None:
# f_01_power = self.spec_pow()
# if f_12_power == None:
# f_12_power = f_01_power+20
if freqs_01 is None:
freqs_01 = self.freq_qubit()+np.arange(-20e6, 20.1e6, step_size)
if freqs_12 is None:
freqs_12 = self.freq_qubit() + self.anharmonicity() + \
np.arange(-20e6, 20.1e6, 1e6)
f_anharmonicity = np.mean(freqs_01) - np.mean(freqs_12)
if f_01_power == None:
f_01_power = self.spec_pow()
if f_12_power == None:
f_12_power = f_01_power+5
print('f_anharmonicity estimation', f_anharmonicity)
print('f_12 estimations', np.mean(freqs_12))
CCL = self.instr_CC.get_instr()
if mode == 'pulsed_marked':
p = sqo.pulsed_spec_seq_marked(
qubit_idx=self.cfg_qubit_nr(),
spec_pulse_length=self.spec_pulse_length(),
platf_cfg=self.cfg_openql_platform_fn(),
trigger_idx=0,
trigger_idx_2=9)
else:
p = sqo.pulsed_spec_seq(
qubit_idx=self.cfg_qubit_nr(),
spec_pulse_length=self.spec_pulse_length(),
platf_cfg=self.cfg_openql_platform_fn())
CCL.eqasm_program(p.filename)
if MC is None:
MC = self.instr_MC.get_instr()
if spec_source_2 is None:
spec_source_2 = self.instr_spec_source_2.get_instr()
spec_source = self.instr_spec_source.get_instr()
self.prepare_for_continuous_wave()
self.int_avg_det_single._set_real_imag(False)
spec_source.on()
if mode == 'pulsed_marked':
spec_source.pulsemod_state('On')
else:
spec_source.pulsemod_state('Off')
spec_source.power(f_01_power)
spec_source_2.on()
if mode == 'pulsed_marked':
spec_source_2.pulsemod_state('On')
else:
spec_source_2.pulsemod_state('Off')
spec_source_2.power(f_12_power)
MC.set_sweep_function(wrap_par_to_swf(
spec_source.frequency, retrieve_value=True))
MC.set_sweep_points(freqs_01)
MC.set_sweep_function_2D(wrap_par_to_swf(
spec_source_2.frequency, retrieve_value=True))
MC.set_sweep_points_2D(freqs_12)
MC.set_detector_function(self.int_avg_det_single)
MC.run_2D(name='Two_tone_'+self.msmt_suffix)
ma.TwoD_Analysis(auto=True)
spec_source.off()
spec_source_2.off()
ma.Three_Tone_Spectroscopy_Analysis(
label='Two_tone', f01=np.mean(freqs_01), f12=np.mean(freqs_12))
def measure_anharmonicity_GBT(self, freqs_01=None, freqs_12=None, f_01_power=None,
f_12_power=None,
MC=None, spec_source_2=None,
mode='pulsed_marked'):
"""
Measures the qubit spectroscopy as a function of frequency of the two
driving tones. The qubit transitions are observed when frequency of one
drive matches the qubit frequency, or when sum of frequencies matches
energy difference between ground and second excited state. Consequently
frequency of 01 and 12 transitions can be extracted simultaneously
yoielding anharmonicity measurement.
Typically a good guess for the 12 transition frequencies is
f01 + alpha where alpha is the anharmonicity and typically ~ -300 MHz
Args:
freqs_01: frequencies of the first qubit drive
freqs_12: frequencies of the second qubit drive
f_01_power: power of the first qubit drive. By default the power
is set to self.spec_pow
f_12_power: power of the second qubit drive. By default the power
is set to self.spec_pow. Likely it needs to be increased
by 10-20 dB to yield meaningful result
spec_source_2: instrument used to apply second MW drive.
By default instrument specified by self.instr_spec_source_2 is used
mode (str):
if pulsed_marked uses pulsed spectroscopy sequence assuming
that the sources are pulsed using a marker.
Otherwise, uses CW spectroscopy.
"""
if freqs_01 is None:
freqs_01 = self.freq_qubit()+np.arange(-30e6, 30.1e6, 0.5e6)
if freqs_12 is None:
freqs_12 = self.freq_qubit() + self.anharmonicity() + \
np.arange(-30e6, 30.1e6, 0.5e6)
f_anharmonicity = np.mean(freqs_01) - np.mean(freqs_12)
if f_01_power == None:
f_01_power = self.spec_pow()
if f_12_power == None:
f_12_power = f_01_power+20
print('f_anharmonicity estimation', f_anharmonicity)
print('f_12 estimations', np.mean(freqs_12))
CCL = self.instr_CC.get_instr()
p = sqo.pulsed_spec_seq_marked(
qubit_idx=self.cfg_qubit_nr(),
spec_pulse_length=self.spec_pulse_length(),
platf_cfg=self.cfg_openql_platform_fn(),
trigger_idx=0)
CCL.eqasm_program(p.filename)
if MC is None:
MC = self.instr_MC.get_instr()
if spec_source_2 is None:
spec_source_2 = self.instr_spec_source_2.get_instr()
spec_source = self.instr_spec_source.get_instr()
old_spec_pow = self.spec_pow()
self.prepare_for_continuous_wave()
self.int_avg_det_single._set_real_imag(False)
spec_source.on()
if mode == 'pulsed_marked':
spec_source.pulsemod_state('On')
else:
spec_source.pulsemod_state('Off')
spec_source.power(f_01_power)
spec_source_2.on()
if mode == 'pulsed_marked':
spec_source_2.pulsemod_state('On')
else:
spec_source_2.pulsemod_state('Off')
spec_source_2.power(f_12_power)
MC.set_sweep_function(wrap_par_to_swf(
spec_source.frequency, retrieve_value=True))
MC.set_sweep_points(freqs_01)
MC.set_sweep_function_2D(wrap_par_to_swf(
spec_source_2.frequency, retrieve_value=True))
MC.set_sweep_points_2D(freqs_12)
MC.set_detector_function(self.int_avg_det_single)
MC.run_2D(name='Two_tone_'+self.msmt_suffix)
ma.TwoD_Analysis(auto=True)
spec_source.off()
spec_source_2.off()
self.spec_pow(old_spec_pow)
# if analyze:
# a = ma.Three_Tone_Spectroscopy_Analysis(label='Two_tone', f01=np.mean(freqs_01), f12=np.mean(freqs_12))
# if update:
# self.anharmonicity(a.anharm)
# return a.T1
ma_obj = ma.Three_Tone_Spectroscopy_Analysis_test(
label='Two_tone', f01=np.mean(freqs_01), f12=np.mean(freqs_12))
rel_change = (abs(self.anharmonicity()) -
ma_obj.Anharm_dict['anharmonicity'])/self.anharmonicity()
threshold_for_change = 0.1
if np.abs(rel_change) > threshold_for_change:
return False
else:
return True
def measure_photon_nr_splitting_from_bus(self, f_bus, freqs_01=None,
powers=np.arange(-10, 10, 1), MC=None,
spec_source_2=None):
"""
Measures photon splitting of the qubit due to photons in the bus resonators.
Specifically it is a CW qubit pectroscopy with the second variable-power CW tone
applied at frequency f_bus.
Refs:
Schuster Nature 445, 515–518 (2007)
(note that in the paper RO resonator has lower frequency than the qubit)
Args:
f_bus: bus frequency at which variable-power CW tone is applied
freqs_01: range of frequencies of the CW qubit MW drive. If not specified
range -60 MHz to +5 MHz around freq_qubit fill be used.
powers: sweeped powers of the bus CW drive.
spec_source_2: sepcifies instrument used to apply bus MW drive. By default
instr_spec_source_2 is used.
"""
if freqs_01 is None:
freqs_01 = np.arange(self.freq_qubit()-60e6,
self.freq_qubit()+5e6, 0.7e6)
self.prepare_for_continuous_wave()
if MC is None:
MC = self.instr_MC.get_instr()
CCL = self.instr_CC.get_instr()
if spec_source_2 is None:
spec_source_2 = self.instr_spec_source_2.get_instr()
spec_source = self.instr_spec_source.get_instr()
p = sqo.pulsed_spec_seq(
qubit_idx=self.cfg_qubit_nr(),
spec_pulse_length=self.spec_pulse_length(),
platf_cfg=self.cfg_openql_platform_fn())
CCL.eqasm_program(p.filename)
self.int_avg_det_single._set_real_imag(False)
spec_source.on()
spec_source.power(self.spec_pow())
spec_source_2.on()
spec_source_2.frequency(f_bus)
MC.set_sweep_function(wrap_par_to_swf(
spec_source.frequency, retrieve_value=True))
MC.set_sweep_points(freqs_01)
MC.set_sweep_function_2D(wrap_par_to_swf(
spec_source_2.power, retrieve_value=True))
MC.set_sweep_points_2D(powers)
MC.set_detector_function(self.int_avg_det_single)
MC.run_2D(name='Photon_nr_splitting'+self.msmt_suffix)
ma.TwoD_Analysis(auto=True)
spec_source.off()
spec_source_2.off()
def measure_ssro_vs_frequency_amplitude(
self, freqs=None, amps_rel=np.linspace(0, 1, 11),
nr_shots=4092*4, nested_MC=None, analyze=True,
use_optimal_weights=False, label='SSRO_freq_amp_sweep'):
"""
Measures SNR and readout fidelities as a function of the readout pulse amplitude
and frequency. Resonator depletion pulses are automatically scaled.
Weights are not optimized - routine is intended to be used with SSB weights.
Args:
freqs (array):
readout freqencies to loop over
amps_rel (array):
readout pulse amplitudes to loop over. Value of 1 indicates
amplitude currently specified in the qubit object.
nr_shots (int):
total number of measurements in qubit ground and excited state
"""
warnings.warn('FIXME: Does not make use of the SSRO detector')
if nested_MC is None:
nested_MC = self.instr_nested_MC.get_instr()
if freqs is None:
freqs = np.linspace(self.ro_freq()-4e6, self.ro_freq()+2e6, 11)
self.prepare_for_timedomain()
RO_lutman = self.instr_LutMan_RO.get_instr()
old_ro_prepare_state = self.cfg_prepare_ro_awg()
self.ro_acq_digitized(False)
self.cfg_prepare_ro_awg(False)
sweep_function = swf.lutman_par_depletion_pulse_global_scaling(
LutMan=RO_lutman,
resonator_numbers=[self.cfg_qubit_nr()],
optimization_M_amps=[self.ro_pulse_amp()],
optimization_M_amp_down0s=[self.ro_pulse_down_amp0()],
optimization_M_amp_down1s=[self.ro_pulse_down_amp1()],
upload=True
)
def measure_ssro_vs_TWPA_frequency_power(
self, pump_source, freqs, powers,
nr_shots=4092*4, nested_MC=None, analyze=True):
"""
Measures the SNR and readout fidelities as a function of the TWPA
pump frequency and power.
Args:
pump_source (RohdeSchwarz_SGS100A):
object controlling the MW source serving as TWPA pump
freqs (array):
TWPA pump frequencies to sweep over
powers (array):
list of TWPA pump powers to sweep over
nr_shots (int):
number of single-shot measurements used to estimate SNR
and redout fidelities
"""
warnings.warn('FIXME: Does not make use of the SSRO detector')
if nested_MC is None:
nested_MC = self.instr_nested_MC.get_instr()
self.prepare_for_timedomain()
RO_lutman = self.instr_LutMan_RO.get_instr()
old_ro_prepare_state = self.cfg_prepare_ro_awg()
self.ro_acq_digitized(False)
self.cfg_prepare_ro_awg(False)
d = det.Function_Detector(
self.measure_ssro,
msmt_kw={
'nr_shots': nr_shots,
'analyze': True, 'SNR_detector': True,
'cal_residual_excitation': True,
'prepare': False,
'disable_metadata': True
},
result_keys=['SNR', 'F_d', 'F_a']
)
nested_MC.set_sweep_function(pump_source.frequency)
nested_MC.set_sweep_points(freqs)
nested_MC.set_detector_function(d)
nested_MC.set_sweep_function_2D(pump_source.power)
nested_MC.set_sweep_points_2D(powers)
label = 'SSRO_freq_amp_sweep' + self.msmt_suffix
nested_MC.run(label, mode='2D')
self.cfg_prepare_ro_awg(old_ro_prepare_state)
if analyze:
ma.TwoD_Analysis(label=label, plot_all=True, auto=True)
def measure_ssro_vs_pulse_length(self, lengths=np.arange(100e-9, 1501e-9, 100e-9),
nr_shots=4092*4, nested_MC=None, analyze=True,
label_suffix: str = ''):
"""
Measures the SNR and readout fidelities as a function of the duration
of the readout pulse. For each pulse duration transients are
measured and optimal weights calculated.
Args:
lengths (array):
durations of the readout pulse for which SNR is measured
nr_shots (int):
number of single-shot measurements used to estimate SNR
and redout fidelities
"""
warnings.warn('FIXME: Does not make use of the SSRO detector')
if nested_MC is None:
nested_MC = self.instr_nested_MC.get_instr()
self.ro_acq_digitized(False)
self.prepare_for_timedomain()
RO_lutman = self.instr_LutMan_RO.get_instr()
sweep_function = swf.lutman_par_UHFQC_dig_trig(
LutMan=RO_lutman,
LutMan_parameter=RO_lutman['M_length_R{}'.format(
self.cfg_qubit_nr())]
)
d = det.Function_Detector(
self.calibrate_optimal_weights,
msmt_kw={
'analyze': True,
},
result_keys=['SNR', 'F_d', 'F_a', 'relaxation', 'excitation']
)
# nested_MC.set_sweep_function(sweep_function)
nested_MC.set_sweep_function(self.ro_pulse_length)
nested_MC.set_sweep_points(lengths)
nested_MC.set_detector_function(d)
label = 'SSRO_length_sweep' + self.msmt_suffix + label_suffix
nested_MC.run(label)
if analyze:
ma.MeasurementAnalysis(label=label, plot_all=False, auto=True)
def measure_transients_CCL_switched(self, MC=None, analyze: bool = True,
cases=('off', 'on'),
prepare: bool = True, depletion_analysis: bool = True,
depletion_analysis_plot: bool = True,
depletion_optimization_window=None):
# docstring from parent class
if MC is None:
MC = self.instr_MC.get_instr()
self.prepare_for_timedomain()
# off/on switching is achieved by turning the MW source on and
# off as this is much faster than recompiling/uploading
transients = []
for i, pulse_comb in enumerate(cases):
p = sqo.off_on(
qubit_idx=self.cfg_qubit_nr(), pulse_comb=pulse_comb,
initialize=False,
platf_cfg=self.cfg_openql_platform_fn())
self.instr_CC.get_instr().eqasm_program(p.filename)
s = swf.OpenQL_Sweep(openql_program=p,
CCL=self.instr_CC.get_instr(),
parameter_name='Transient time', unit='s',
upload=prepare)
MC.set_sweep_function(s)
if 'UHFQC' in self.instr_acquisition():
sampling_rate = 1.8e9
else:
raise NotImplementedError()
MC.set_sweep_points(
np.arange(self.input_average_detector.nr_samples) /
sampling_rate)
MC.set_detector_function(self.input_average_detector)
data = MC.run(
'Measure_transients{}_{}'.format(self.msmt_suffix, i))
dset = data['dset']
transients.append(dset.T[1:])
if analyze:
ma.MeasurementAnalysis()
if depletion_analysis:
a = ma.Input_average_analysis(
IF=self.ro_freq_mod(),
optimization_window=depletion_optimization_window,
plot=depletion_analysis_plot)
return a
else:
return [np.array(t, dtype=np.float64) for t in transients]
def measure_dispersive_shift_pulsed(self, freqs=None, MC=None, analyze: bool = True,
prepare: bool = True):
"""
Measures the RO resonator spectroscopy with the qubit in ground and excited state.
Specifically, performs two experiments. Applies sequence:
- initialize qubit in ground state ( wait)
- (only in the second experiment) apply a (previously tuned up) pi pulse
- apply readout pulse and measure
This sequence is repeated while sweeping ro_freq.
Args:
freqs (array):
sweeped range of ro_freq
"""
# docstring from parent class
if MC is None:
MC = self.instr_MC.get_instr()
if freqs is None:
if self.freq_res() is None:
raise ValueError(
"Qubit has no resonator frequency.\
\nUpdate freq_res parameter.")
else:
freqs = self.freq_res()+np.arange(-10e6, 5e6, .1e6)
if 'optimal' in self.ro_acq_weight_type():
raise NotImplementedError(
"Change readout demodulation to SSB.")
self.prepare_for_timedomain()
# off/on switching is achieved by turning the MW source on and
# off as this is much faster than recompiling/uploading
f_res = []
for i, pulse_comb in enumerate(['off', 'on']):
p = sqo.off_on(
qubit_idx=self.cfg_qubit_nr(), pulse_comb=pulse_comb,
initialize=False,
platf_cfg=self.cfg_openql_platform_fn())
self.instr_CC.get_instr().eqasm_program(p.filename)
# CCL gets started in the int_avg detector
MC.set_sweep_function(swf.Heterodyne_Frequency_Sweep_simple(
MW_LO_source=self.instr_LO_ro.get_instr(),
IF=self.ro_freq_mod()))
MC.set_sweep_points(freqs)
self.int_avg_det_single._set_real_imag(False)
MC.set_detector_function(self.int_avg_det_single)
MC.run(name='Resonator_scan_'+pulse_comb+self.msmt_suffix)
if analyze:
ma.MeasurementAnalysis()
a = ma.Homodyne_Analysis(
label=self.msmt_suffix, close_fig=True)
# fit converts to Hz
f_res.append(a.fit_results.params['f0'].value*1e9)
if analyze:
a = ma2.Dispersive_shift_Analysis()
self.dispersive_shift(a.qoi['dispersive_shift'])
# Dispersive shift from 'hanger' fit
#print('dispersive shift is {} MHz'.format((f_res[1]-f_res[0])*1e-6))
# Dispersive shift from peak finder
print('dispersive shift is {} MHz'.format(
a.qoi['dispersive_shift']*1e-6))
return True
def measure_error_fraction(self, MC=None, analyze: bool = True,
nr_shots: int = 2048*4,
sequence_type='echo', prepare: bool = True,
feedback=False,
depletion_time=None, net_gate='pi'):
"""
This performs a multiround experiment, the repetition rate is defined
by the ro_duration which can be changed by regenerating the
configuration file.
The analysis counts single errors. The definition of an error is
adapted automatically by choosing feedback or the net_gate.
it requires high SNR single shot readout and a calibrated threshold.
"""
self.ro_acq_digitized(True)
if MC is None:
MC = self.instr_MC.get_instr()
# plotting really slows down SSRO (16k shots plotting is slow)
old_plot_setting = MC.live_plot_enabled()
MC.live_plot_enabled(False)
MC.soft_avg(1) # don't want to average single shots
if prepare:
self.prepare_for_timedomain()
# off/on switching is achieved by turning the MW source on and
# off as this is much faster than recompiling/uploading
p = sqo.RTE(
qubit_idx=self.cfg_qubit_nr(), sequence_type=sequence_type,
platf_cfg=self.cfg_openql_platform_fn(), net_gate=net_gate,
feedback=feedback)
self.instr_CC.get_instr().eqasm_program(p.filename)
else:
p = None # object needs to exist for the openql_sweep to work
s = swf.OpenQL_Sweep(openql_program=p,
CCL=self.instr_CC.get_instr(),
parameter_name='shot nr', unit='#',
upload=prepare)
MC.set_sweep_function(s)
MC.set_sweep_points(np.arange(nr_shots))
d = self.int_log_det
MC.set_detector_function(d)
exp_metadata = {'feedback': feedback, 'sequence_type': sequence_type,
'depletion_time': depletion_time, 'net_gate': net_gate}
suffix = 'depletion_time_{}_ro_pulse_{}_feedback_{}_net_gate_{}'.format(
depletion_time, self.ro_pulse_type(), feedback, net_gate)
MC.run(
'RTE_{}_{}'.format(self.msmt_suffix, suffix),
exp_metadata=exp_metadata)
MC.live_plot_enabled(old_plot_setting)
if analyze:
a = ma2.Single_Qubit_RoundsToEvent_Analysis(
t_start=None, t_stop=None,
options_dict={'typ_data_idx': 0,
'scan_label': 'RTE'},
extract_only=True)
return {'error fraction': a.proc_data_dict['frac_single']}
def measure_msmt_induced_dephasing(self, MC=None, sequence='ramsey',
label: str = '',
verbose: bool = True,
analyze: bool = True,
close_fig: bool = True,
update: bool = True,
cross_target_qubits: list = None,
multi_qubit_platf_cfg=None,
target_qubit_excited=False,
extra_echo=False):
# docstring from parent class
# Refs:
# Schuster PRL 94, 123602 (2005)
# Gambetta PRA 74, 042318 (2006)
if MC is None:
MC = self.instr_MC.get_instr()
if cross_target_qubits is None:
platf_cfg = self.cfg_openql_platform_fn()
else:
platf_cfg = multi_qubit_platf_cfg
self.prepare_for_timedomain()
self.instr_LutMan_MW.get_instr().load_phase_pulses_to_AWG_lookuptable()
if cross_target_qubits is None:
qubits = [self.cfg_qubit_nr()]
else:
qubits = []
for cross_target_qubit in cross_target_qubits:
qubits.append(cross_target_qubit.cfg_qubit_nr())
qubits.append(self.cfg_qubit_nr())
# angles = np.arange(0, 421, 20)
angles = np.concatenate(
[np.arange(0, 101, 20), | np.arange(140, 421, 20) | numpy.arange |
#!/usr/bin/env python3
import os
import io
import sys
import re
import numpy as np
from collections import OrderedDict
#
# ----------- Type Registry -----------
#
from ..conversion import convert_dims, convert_device, convert_dtype
_read_functions = {}
_write_functions = {}
def is_list(x):
if isinstance(x, list) or isinstance(x, tuple):
return True
return False
def register_read_function(ext, func, type=""):
global _read_functions
if is_list(ext):
for ent in ext:
register_read_function(ent, func, type)
else:
_read_functions[ext] = (type, func)
def register_write_function(ext, func, type=""):
global _write_functions
if is_list(ext):
for ent in ext:
register_write_function(ent, func, type)
else:
_write_functions[ext] = (type, func)
# Memory filesystems
_filesystems = []
def register_file_system(filesystem):
global _filesystems
if filesystem not in _filesystems:
_filesystems.append(filesystem)
def unregister_file_system(filesystem):
global _filesystems
if filesystem in _filesystems:
_filesystems.remove(filesystem)
def _open(filename, mode):
global _filesystems
if 'w' in mode:
for fs in _filesystems:
if fs.includes(filename):
if 'a' in mode:
raise NotImplementedError
if 'b' in mode: data = io.BytesIO()
else: data = io.StringIO()
data.close = lambda: None
fs[filename] = data
return data
if 'r' in mode:
for fs in _filesystems:
if fs.includes(filename):
if 'b' in mode: data = io.BytesIO(fs[filename])
else: data = io.StringIO(fs[filename])
return data
return open(filename, mode)
def _open_file_for_reading(filename, binary=True):
global _filesystems
for fs in _filesystems:
if fs.includes(filename):
data = fs[filename]
return io.BytesIO(data)
return open(filename, "r" if not binary else "rb")
def _write_file(filename, data, binary=True):
global _filesystems
for fs in _filesystems:
if fs.includes(filename):
fs[filename] = data
return
open(filename, "w" if not binary else "wb").write(data)
def _is_filesystem_file(filename):
global _filesystems
for fs in _filesystems:
if fs.includes(filename):
return True
return False
def mkdirs(path):
global _filesystems
for fs in _filesystems:
if fs.includes(path):
fs.mkdir(path)
return
from pathlib import Path
Path(path).mkdir(parents=True, exist_ok=True)
return path
def exists(filename):
global _filesystems
for fs in _filesystems:
if filename in fs:
return True
return os.path.exists(filename)
#
# ----------- Proxy Functions -----------
#
def read(file, *args, **kwargs):
global _read_functions
from .file import File
# Make sure file is a file
file = File(file)
# Get extension
ext = file.extension()
if ext not in _read_functions:
raise Exception(f"Don't know how to read extension '{ext}'")
# Get type and function
type, func = _read_functions[ext]
# Process arguments for specific type
if "image" in type:
alpha = kwargs.pop("alpha", None)
if "data" in type:
dtype = kwargs.pop("dtype", None)
dims = kwargs.pop("dims", "hwc")
device = kwargs.pop("device", "numpy")
# Low-level read
value = func(file.abs().str(), *args, **kwargs)
# Post-process
if "image" in type:
# Ensure we have three dimensions (hwc)
if len(value.shape) == 2:
value = np.expand_dims(value, 2)
# Correct the alpha channel if alpha=True or alpha=False is given
if alpha is True:
if value.shape[2] == 1:
value = np.concatenate((value, np.ones(value.shape, dtype=value.dtype)), axis=2)
if value.shape[2] == 3:
value = np.concatenate((value, np.ones((value.shape[0], value.shape[1], 1), dtype=value.dtype)), axis=2)
if alpha is False:
if value.shape[2] == 2:
value = value[:, :, 0:1]
if value.shape[2] == 4:
value = value[:, :, 0:3]
if "data" in type:
value = convert_dtype(value, dtype)
value = convert_dims(value, "hwc", dims)
value = convert_device(value, device)
return value
def write(file, data, *args, **kwargs):
global _write_functions
from .file import File
# Make sure file is a file
file = File(file)
ext = file.extension()
if ext is None: ext='txt' # If we don't have an extension, we assume a text file
if ext not in _write_functions:
raise Exception(f"Don't know how to write extension '{ext}'")
type, func = _write_functions[ext]
if "data" in type:
data = convert_device(data, "numpy")
if "dims" in kwargs:
dims = kwargs.pop("dims")
data = convert_dims(data, dims, "hwc")
value = func(file.abs().str(), data, *args, **kwargs)
return value
#
# ----------- Pickle (.p) -----------
#
def read_pickle(filename):
f = _open_file_for_reading(filename)
import pickle
return pickle.load(f)
register_read_function('p', read_pickle)
def write_pickle(filename, data):
f = io.BytesIO()
import pickle
pickle.dump(data, f)
_write_file(filename, f.getvalue())
register_write_function('p', write_pickle)
#
# ----------- Numpy (.np, .npy) -----------
#
def read_numpy(filename):
f = _open_file_for_reading(filename)
return | np.load(f, allow_pickle=True) | numpy.load |
#-------------------------------------------------------------------------------
#
# Spherical Harmonic Coefficients.
#
# Author: <NAME> <<EMAIL>>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2018 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods,abstract-method
from numpy import (
inf, array, zeros, dot, digitize, argsort, abs as aabs, stack,
)
from ..time_util import decimal_year_to_mjd2000, mjd2000_to_decimal_year
class SHCoefficients(object):
""" Abstract base class for the spherical harmonic coefficients. """
def __init__(self, is_internal=True, **kwargs):
self.is_internal = is_internal
self.validity = self._get_converted_validity(**kwargs)
@staticmethod
def _get_converted_validity(validity_start=None, validity_end=None,
to_mjd2000=None, **_):
""" Get validity range converted to MJD2000 using the optionally
provided conversion function.
"""
def _get_converted_value_or_default(value, default, conversion_function):
if value is None:
value = default
if conversion_function is not None:
value = conversion_function(value)
return value
return (
_get_converted_value_or_default(validity_start, -inf, to_mjd2000),
_get_converted_value_or_default(validity_end, +inf, to_mjd2000),
)
def is_valid(self, time):
""" Check if the time is within the coefficients validity range. """
validity_start, validity_end = self.validity
return (validity_start <= time) & (time <= validity_end)
@property
def degree(self):
""" Get [maximum] model degree. """
raise NotImplementedError
@property
def min_degree(self):
""" Get minimum model degree.
Below this degree all model coefficients are zero.
"""
raise NotImplementedError
def __call__(self, time, **parameters):
""" Return the matrix of the full model coefficients. """
raise NotImplementedError
class CombinedSHCoefficients(SHCoefficients):
""" Model composed of multiple coefficient sets. """
def __init__(self, *items):
if len(items) < 1:
raise ValueError(
"The composed model must be composed from at least one "
"coefficient set."
)
item = items[0]
is_internal = item.is_internal
validity_start, validity_end = item.validity
degree = item.degree
min_degree = item.min_degree
for item in items[1:]:
if is_internal != item.is_internal:
raise ValueError(
"Mixing of external and internal coefficient sets!"
)
new_start, new_end = item.validity
validity_start = max(validity_start, new_start)
validity_end = min(validity_end, new_end)
degree = max(degree, item.degree)
min_degree = min(min_degree, item.min_degree)
SHCoefficients. __init__(
self, is_internal=is_internal, validity_start=validity_start,
validity_end=validity_end,
)
self._degree = degree
self._min_degree = min_degree
self._items = items
@property
def degree(self):
return self._degree
@property
def min_degree(self):
return self._min_degree
def __call__(self, time, **parameters):
max_degree = parameters.get("max_degree", -1)
degree = self.degree if max_degree < 0 else min(self.degree, max_degree)
coeff_full = zeros((coeff_size(degree), 2))
for item in self._items:
item_coeff, item_degree = item(time, **parameters)
coeff_full[:coeff_size(item_degree), :] += item_coeff
return coeff_full, degree
class SparseSHCoefficients(SHCoefficients):
""" Base class for sparse spherical harmonic coefficients. """
def __init__(self, indices, coefficients, **kwargs):
SHCoefficients.__init__(self, **kwargs)
n_idx, m_idx = indices[..., 0], indices[..., 1]
self._degree = n_idx.max()
self._min_degree = n_idx.min()
self._index = stack((
aabs(m_idx) + (n_idx*(n_idx + 1))//2,
(m_idx < 0).astype('int'),
n_idx,
), 1)
self._coeff = coefficients
@property
def degree(self):
return self._degree
@property
def min_degree(self):
return self._min_degree
def _subset(self, min_degree, max_degree):
""" Get subset of the coefficients for the give min. and max. degrees.
"""
default_max_degree = self._degree
default_min_degree = self._min_degree
#degree = self._degree
index = self._index
coeff = self._coeff
if max_degree < 0:
max_degree = default_max_degree
if min_degree < 0:
min_degree = default_min_degree
if min_degree > default_min_degree or max_degree < default_max_degree:
idx, = (
(index[:, 2] <= max_degree) & (index[:, 2] >= min_degree)
).nonzero()
coeff = coeff[idx]
index = index[idx]
degree = default_max_degree
if index.shape[0] > 0:
degree = index[:, 2].max()
else:
degree = 0
else:
degree = default_max_degree
return degree, coeff, index[:, 0], index[:, 1]
class SparseSHCoefficientsConstant(SparseSHCoefficients):
""" Time invariant sparse spherical harmonic coefficients. """
def __init__(self, indices, coefficients, **kwargs):
SparseSHCoefficients.__init__(self, indices, coefficients, **kwargs)
def __call__(self, time, **parameters):
degree, coeff, index, kind = self._subset(
parameters.get("min_degree", -1), parameters.get("max_degree", -1)
)
coeff_full = zeros((coeff_size(degree), 2))
coeff_full[index, kind] = coeff
return coeff_full, degree
class SparseSHCoefficientsTimeDependent(SparseSHCoefficients):
""" Time dependent sparse spherical harmonic coefficients
evaluated by piecewise linear interpolation of a time series of
coefficients snapshots.
"""
def __init__(self, indices, coefficients, times, **kwargs):
order = argsort(times)
kwargs['validity_start'] = kwargs.get('validity_start', times[order[0]])
kwargs['validity_end'] = kwargs.get('validity_end', times[order[-1]])
self._times = _convert(times[order], kwargs.get("to_mjd2000"))
SparseSHCoefficients.__init__(
self, indices, coefficients[:, order], **kwargs
)
def __call__(self, time, **parameters):
degree, coeff, index, kind = self._subset(
parameters.get("min_degree", -1), parameters.get("max_degree", -1)
)
coeff_full = zeros((coeff_size(degree), 2))
coeff_full[index, kind] = self._interpolate_coefficients(time, coeff)
return coeff_full, degree
def _interpolate_coefficients(self, time, coeff):
""" Return interpolated coefficients. """
idx, basis = self._interpolation_basis(time)
return | dot(coeff[:, idx], basis) | numpy.dot |
import importlib
from functools import partial
import numpy as np
from highway_env import utils
def finite_mdp(env,
time_quantization=1.,
horizon=10.):
"""
Time-To-Collision (TTC) representation of the state.
The state reward is defined from a occupancy grid over different TTCs and lanes. The grid cells encode the
probability that the ego-vehicle will collide with another vehicle if it is located on a given lane in a given
duration, under the hypothesis that every vehicles observed will maintain a constant velocity (including the
ego-vehicle) and not change lane (excluding the ego-vehicle).
For instance, in a three-lane road with a vehicle on the left lane with collision predicted in 5s the grid will
be:
[0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0]
The TTC-state is a coordinate (lane, time) within this grid.
If the ego-vehicle has the ability to change its velocity, an additional layer is added to the occupancy grid
to iterate over the different velocity choices available.
Finally, this state is flattened for compatibility with the FiniteMDPEnv environment.
:param AbstractEnv env: an environment
:param time_quantization: the time quantization used in the state representation [s]
:param horizon: the horizon on which the collisions are predicted [s]
"""
# Compute TTC grid
grid = compute_ttc_grid(env, time_quantization, horizon)
# Compute current state
grid_state = (env.vehicle.speed_index(), env.vehicle.lane_index[2], 0)
state = np.ravel_multi_index(grid_state, grid.shape)
# Compute transition function
transition_model_with_grid = partial(transition_model, grid=grid)
transition = | np.fromfunction(transition_model_with_grid, grid.shape + (env.action_space.n,), dtype=int) | numpy.fromfunction |
"""
Creates dataset of SEoEi
Author(s): <NAME> (<EMAIL>)
"""
import os
import numpy as np
from matplotlib import pyplot as plt
#plt.switch_backend('Qt5Agg')
import math
from scipy.interpolate import splev, splprep, interp1d
from scipy.integrate import cumtrapz
import sys
sys.path.append("../")
from utils import visualize
def get_sf_params(variables, alpha, beta):
'''
alpha : control nonlinearity
beta : control number of categories
'''
params = []
for v in variables:
# v = [s, t]
# Set [m, n1, n2, n3]
params.append([4+math.floor(v[0]+v[1])%beta, alpha*v[0], alpha*(v[0]+v[1]), alpha*(v[0]+v[1])])
return np.array(params)
def r(phi, m, n1, n2, n3):
# a = b = 1, m1 = m2 = m
return ( abs(math.cos(m * phi / 4)) ** n2 + abs(math.sin(m * phi / 4)) ** n3 ) ** (-1/n1)
def interpolate(Q, N, k, D=20, resolution=1000):
''' Interpolate N points whose concentration is based on curvature. '''
res, fp, ier, msg = splprep(Q.T, u=None, k=k, s=1e-6, per=0, full_output=1)
tck, u = res
uu = np.linspace(u.min(), u.max(), resolution)
x, y = splev(uu, tck, der=0)
dx, dy = splev(uu, tck, der=1)
ddx, ddy = splev(uu, tck, der=2)
cv = np.abs(ddx*dy - dx*ddy)/(dx*dx + dy*dy)**1.5 + D
cv_int = cumtrapz(cv, uu, initial=0)
fcv = interp1d(cv_int, uu)
cv_int_samples = np.linspace(0, cv_int.max(), N)
u_new = fcv(cv_int_samples)
x_new, y_new = splev(u_new, tck, der=0)
return x_new, y_new, fp, ier
def gen_superformula(m, n1, n2, n3, num_points=64):
phis = np.linspace(0, 2*np.pi, num_points*4)#, endpoint=False)
S = [(r(phi, m, n1, n2, n3) * math.cos(phi),
r(phi, m, n1, n2, n3) * math.sin(phi)) for phi in phis]
S = np.array(S)
# Scale the heights to 1.0
mn = np.min(S[:,1])
mx = np.max(S[:,1])
h = mx-mn
S /= h
x_new, y_new, fp, ier = interpolate(S, N=num_points, k=3)
S = np.vstack((x_new, y_new)).T
return S
def gen_ellipse(a, b, num_points=16):
phis = np.linspace(0, 2*np.pi, num_points)
E = [(a * math.cos(phi),
b * math.sin(phi)) for phi in phis]
return np.array(E)
def filt_se(superformula, ellipses):
N = ellipses.shape[0]
R_sf = np.linalg.norm(superformula, axis=-1)
Rs_sf = np.tile(np.expand_dims(R_sf, axis=0), (N, 1))
Rs_el = np.linalg.norm(ellipses, axis=-1)
# The radii of ellipse should be smaller than those of the superformula at the same angle
feas_ind = np.all(Rs_sf > Rs_el, axis=-1)
return feas_ind
def build_data(s_points=64, e_points=16):
n_s = 1000
n_eo = 11
n_ei = 1
# Superformulas
vars_sf = np.random.uniform(1.0, 10.0, size=(n_s, 2))
params = get_sf_params(vars_sf, 1.0, 1)
superformulas = []
for param in params:
try:
superformula = gen_superformula(param[0], param[1], param[2], param[3], num_points=s_points)
superformulas.append(superformula)
except ValueError:
print('Unable to interpolate.')
superformulas = np.array(superformulas)
X = []
count_s = 0
for (i, superformula) in enumerate(superformulas):
# Ellipses
vars_eo = | np.random.uniform(0.05, 0.5, (1000, 2)) | numpy.random.uniform |
# CREATED: 10/14/16 12:35 PM by <NAME> <<EMAIL>>
'''
Utility functions
=================
'''
from contextlib import contextmanager
import logging
import os
import glob
from .scaper_exceptions import ScaperError
import scipy
import numpy as np
import soundfile
@contextmanager
def _close_temp_files(tmpfiles):
'''
Utility function for creating a context and closing all temporary files
once the context is exited. For correct functionality, all temporary file
handles created inside the context must be appended to the ```tmpfiles```
list.
Parameters
----------
tmpfiles : list
List of temporary file handles
'''
yield
for t in tmpfiles:
try:
t.close()
os.unlink(t.name)
except:
pass
@contextmanager
def _set_temp_logging_level(level):
'''
Utility function for temporarily changing the logging level using contexts.
Parameters
----------
level : str or int
The desired temporary logging level. For allowed values see:
https://docs.python.org/2/library/logging.html#logging-levels
'''
logger = logging.getLogger()
current_level = logger.level
logger.setLevel(level)
yield
logger.setLevel(current_level)
# TODO: this needs some formalization
AUDIO_FORMATS = {f.lower() for f in soundfile.available_formats()}
AUDIO_FORMATS |= {'m4a', 'mp3'}
def _get_sorted_files(folder_path, max_depth=None):
'''
Return a list of absolute paths to all valid files contained within the
folder specified by ```folder_path```.
Parameters
----------
folder_path : str
Path to the folder to scan for files.
Returns
-------
files : list
List of absolute paths to all valid files contained within
```folder_path```.
'''
# Ensure path points to valid folder
_validate_folder_path(folder_path)
# Get folder contents and filter for valid files
# Note, we sort the list to ensure consistent behavior across operating
# systems.
files = []
for root, dirs, fs in os.walk(folder_path):
depth = os.path.relpath(root, folder_path).count(os.sep)
if max_depth is None or depth <= max_depth:
files.extend([os.path.join(root, f) for f in fs
if os.path.splitext(f)[1].strip('.') in AUDIO_FORMATS])
files.sort()
return files
def _validate_folder_path(folder_path):
'''
Validate that a provided path points to a valid folder.
Parameters
----------
folder_path : str
Path to a folder.
Raises
------
ScaperError
If ```folder_path``` does not point to a valid folder.
'''
if not os.path.isdir(folder_path):
raise ScaperError(
'Folder path "{:s}" does not point to a valid folder'.format(
str(folder_path)))
def _populate_label_list(folder_path, label_list, max_depth=None):
'''
Given a path to a folder and a list, add the names of all subfolders
contained in this folder (excluding folders whose name starts with '.') to
the provided list. This is used in scaper to populate the lists of valid
foreground and background labels, which are determined by the names of the
folders contained in ```fg_path`` and ```bg_path``` provided during
initialization.
Parameters
----------
folder_path : str
Path to a folder
label_list : list
List to which label (subfolder) names will be added.
See Also
--------
_validate_folder_path : Validate that a provided path points to a valid
folder.
'''
# get all unique labels from available files
new_label_list = {os.path.relpath(os.path.dirname(f), folder_path)
for f in _get_sorted_files(folder_path, max_depth)}
label_list.extend(list(new_label_list))
# ensure consistent ordering of labels
label_list.sort()
def _trunc_norm(mu, sigma, trunc_min, trunc_max):
'''
Return a random value sampled from a truncated normal distribution with
mean ```mu``` and standard deviation ```sigma``` whose values are limited
between ```trunc_min``` and ```trunc_max```.
Parameters
----------
mu : float
The mean of the truncated normal distribution
sig : float
The standard deviation of the truncated normal distribution
trunc_min : float
The minimum value allowed for the distribution (lower boundary)
trunc_max : float
The maximum value allowed for the distribution (upper boundary)
Returns
-------
value : float
A random value sampled from the truncated normal distribution defined
by ```mu```, ```sigma```, ```trunc_min``` and ```trunc_max```.
'''
# By default truncnorm expects a (lower boundary) and b (upper boundary)
# values for a standard normal distribution (mu=0, sigma=1), so we need
# to recompute a and b given the user specified parameters.
a, b = (trunc_min - mu) / float(sigma), (trunc_max - mu) / float(sigma)
return scipy.stats.truncnorm.rvs(a, b, mu, sigma)
def max_polyphony(ann):
'''
Given an annotation of sound events, compute the maximum polyphony, i.e.
the maximum number of simultaneous events at any given point in time. Only
foreground events are taken into consideration for computing the polyphony.
Parameters
----------
ann : JAMS.Annotation
Returns
-------
polyphony : int
Maximum number of simultaneous events at any point in the annotation.
'''
# If there are no foreground events the polyphony is 0
roles = [obs.value['role'] for obs in ann.data]
if 'foreground' not in roles:
return 0
else:
# Keep only foreground events
int_time, int_val = ann.to_interval_values()
int_time_clean = []
for t, v in zip(int_time, int_val):
if v['role'] == 'foreground':
int_time_clean.append(t)
int_time_clean = np.asarray(int_time_clean)
# Sort and reshape
arrivals = np.sort(int_time_clean[:, 0]).reshape(-1, 1)
departures = np.sort(int_time_clean[:, 1]).reshape(-1, 1)
# Onsets are +1, offsets are -1
arrivals = np.concatenate(
(arrivals, np.ones(arrivals.shape)), axis=1)
departures = np.concatenate(
(departures, -np.ones(departures.shape)), axis=1)
# Merge arrivals and departures and sort
entry_log = | np.concatenate((arrivals, departures), axis=0) | numpy.concatenate |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 30 12:05:47 2020
@author: peter
"""
# a script to analyse the steps data.
import numpy as np
import pandas as pd
from pathlib import Path
import tifffile
import scipy.stats
import matplotlib.pyplot as plt
import scipy.ndimage as ndimage
import cancer_functions as canf
import f.ephys_functions as ef
import f.general_functions as gf
import f.plotting_functions as pf
# =============================================================================
# dire = '/home/peter/data/Firefly/cancer/20201228'
# day = '20201228'
#
# def get_steps_dataframe(dire,day):
# files = Path(dire).glob('./**/*.tif')
# smr_files = []
# tif_files = []
#
# for f in files:
#
# meta = canf.load_tif_metadata(f)
# if len(meta) != 1301:
# continue
#
# #search parents for smr file from deepest to shallowest
# start = f.parts.index(day)
# for i in range(len(f.parts)-1,start+1,-1):
# direc = Path(*f.parts[:i])
# smr = [f for f in direc.glob('*.smr')]
# if len(smr) != 0:
# break
#
# smr_files.append([str(s) for s in smr])
# tif_files.append(str(f))
#
#
# max_len = max([len(x) for x in smr_files])
#
# df = pd.DataFrame()
#
# df['tif_file'] = tif_files
#
# for i in range(max_len):
# files = []
# for j in range(len(smr_files)):
# try:
# files.append(smr_files[j][i])
# except IndexError:
# files.append(np.NaN)
#
# df[f'SMR_file_{i}'] = files
#
# return df
#
# df = get_steps_dataframe(dire,day)
#
# df.to_csv('/home/peter/data/Firefly/cancer/analysis/steps_20201230.csv')
# =============================================================================
def load_steps_ephys2(stack_fname, ephys_fname):
stack = tifffile.imread(stack_fname)
n_frames = len(stack)
if Path(ephys_fname).is_file():
ephys_dict = ef.load_ephys_parse(
ephys_fname, analog_names=["LED", "vcVm", "vcIm"], event_names=["CamDown"]
)
e_start = [
float(str(ephys_dict["ephys_start"][1])[i * 2 : (i + 1) * 2])
for i in range(3)
]
e_start[-1] += (float(ephys_dict["ephys_start"][2]) / 10) / 1000
e_start = canf.lin_time(e_start)
meta = canf.load_tif_metadata(stack_fname)
frames, times = canf.get_all_frame_times(meta)
cam = ephys_dict["CamDown_times"]
cam_id = np.argmin(np.abs(cam + e_start - times[0]))
if not cam_check_steps(cam, cam_id, times, n_frames):
if cam_check_steps(cam, cam_id - 1, times, n_frames):
print("sub 1")
cam_id -= 1
elif cam_check_steps(cam, cam_id + 1, times, n_frames):
print("plus 1")
cam_id += 1
elif cam_check_steps(cam, cam_id - 2, times, n_frames):
print("sub 2")
cam_id -= 2
else:
raise ValueError("possible bad segment")
cam = cam[cam_id : cam_id + n_frames]
# slice all
sliced_cam = np.reshape(cam, (13, 100))
stack = np.reshape(stack, (13, 100) + stack.shape[-2:])
T_approx = 3 * 10**-3
# extract LED powers (use slightly longer segment)
idx1, idx2 = ef.time_to_idx(
ephys_dict["LED"], [cam[0] - T_approx * 5, cam[-1] + T_approx * 5]
)
LED_power = canf.get_LED_powers(ephys_dict["LED"][idx1:idx2], cam, T_approx)
# return LED and vm on corect segment
idx1, idx2 = ef.time_to_idx(ephys_dict["LED"], [cam[0] - T_approx, cam[-1]])
LED = canf.slice_all_ephys(ephys_dict["LED"], sliced_cam)
idx1, idx2 = ef.time_to_idx(ephys_dict["vcVm"], [cam[0] - T_approx, cam[-1]])
vcVm = canf.slice_all_ephys(ephys_dict["vcVm"], sliced_cam)
idx1, idx2 = ef.time_to_idx(ephys_dict["vcVm"], [cam[0] - T_approx, cam[-1]])
vcIm = canf.slice_all_ephys(ephys_dict["vcIm"], sliced_cam)
if LED_power[0] < LED_power[1]:
blue = 0
else:
blue = 1
result_dict = {
"cam": cam,
"LED": LED,
"im": np.mean(stack[:, blue::2], 0),
"LED_powers": LED_power,
"stack": stack,
"vcVm": vcVm,
"vcIm": vcIm,
"blue_idx": blue,
"tif_file": stack_fname,
"smr_file": ephys_fname,
}
return result_dict
df = pd.read_csv("/home/peter/data/Firefly/cancer/analysis/steps_20201230_sorted.csv")
def cam_check_steps(cam, cam_id, times, n_frames):
try:
diff = cam[cam_id : cam_id + n_frames] - times
except ValueError:
return False
if diff.max() - diff.min() < 3 * 10**-3:
return True
else:
return False
mean_fs = []
mean_vs = []
mean_is = []
mean_rs = []
fits = []
sens = []
for data in df.itertuples():
s = data.tif_file
trial_string = "_".join(Path(s).parts[Path(s).parts.index("cancer") : -1])
df.loc[data.Index, "trial_string"] = trial_string
trial_save = Path(
"/home/peter/data/Firefly/cancer/analysis/full",
"steps_analysis/data",
trial_string,
)
if not trial_save.is_dir():
trial_save.mkdir(parents=True)
stack_fname = data.tif_file
ephys_fname = data.SMR_file
result_dict = load_steps_ephys2(stack_fname, ephys_fname)
for key in result_dict.keys():
np.save(Path(trial_save, f"{trial_string}_{key}.npy"), result_dict[key])
tifffile.imsave(
Path(
"/home/peter/data/Firefly/cancer/analysis/full",
"steps_analysis/ims",
f"{trial_string}_im.tif",
),
gf.to_8_bit(result_dict["im"]),
)
_, roi = gf.read_roi_file(
Path(
"/home/peter/data/Firefly/cancer/analysis/full",
"steps_analysis/rois",
f"{trial_string}_roi.roi",
),
im_dims=result_dict["im"].shape[-2:],
)
roi2 = np.logical_xor(ndimage.binary_erosion(roi, iterations=4), roi)
stack = result_dict["stack"]
bl = result_dict["blue_idx"]
print(bl)
# blue start is high for some reason, exclude
stack[:, bl, ...] = stack[:, bl + 2, ...]
image = np.mean(stack[0, ...], axis=0)
interped_stack = canf.process_ratio_stacks(stack)
# now get the time courses
t_courses = gf.t_course_from_roi(interped_stack, roi)
# use linear fit for bleaching
sta = np.mean(t_courses[..., :5], -1)
sto = | np.mean(t_courses[..., -5:], -1) | numpy.mean |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
### CR - 20 (The Fake Ronaldo)
import numpy as np
import cv2
import collections
import serial
from picamera.array import PiRGBArray
from picamera import PiCamera
import time, math
import imutils
nondet = 10
whitemax = 0
lol = 4
lol2 = 5
greenmax = 0
val = False
cnt_blue = 0
blue_detected = False
blue_fre = True
frequency_map = {}
centres = []
def blur(img,kernel_size):
ram = cv2.medianBlur(img,kernel_size,0)
test = cv2.GaussianBlur(ram,(kernel_size,kernel_size),0)
return test
def canny(img,low_threshold,high_threshold):
return cv2.Canny(img,low_threshold,high_threshold)
def extract_lines(img, lines, color=[255, 0, 0], thickness=2):
# X cordinates of corresponding lane
left_x = collections.defaultdict(list)
right_x = collections.defaultdict(list)
top_x = collections.defaultdict(list)
# Y cordinates of corresponding lane
left_y = collections.defaultdict(list)
right_y = collections.defaultdict(list)
top_y = collections.defaultdict(list)
try:
for line in lines:
for x1,y1,x2,y2 in line:
# Calculate slope
slope = (y2-y1)*1.0/(x2-x1)
# Grouping together slopes of variation 4
# If abs(slope) is less than 20 deg it is in the top category
if math.fabs(slope) < math.tan(np.pi/9):
top_x[int(math.atan(slope)*60/np.pi)].extend([x1,x2])
top_y[int(math.atan(slope)*60/np.pi)].extend([y1,y2])
# If slope is less than -20 deg it is in the left category
elif slope < math.tan(-np.pi/9) and slope > math.tan(-np.pi*4/9):
left_x[int(math.atan(slope)*60/np.pi)].extend([x1,x2])
left_y[int(math.atan(slope)*60/np.pi)].extend([y1,y2])
# If slope is greater than 20 deg it is in the right category
elif slope > math.tan(np.pi/9) and slope < math.tan(np.pi*4/9):
right_x[int(math.atan(slope)*60/np.pi)].extend([x1,x2])
right_y[int(math.atan(slope)*60/np.pi)].extend([y1,y2])
except TypeError:
pass
max_y = img.shape[0]
min_y = 0
eqns = [None for i in range(3)]
# Use the slope for the angle that has the maximum occurence and square fits
# the points to get an approximate line equation that passes through all the point
# Left
try:
_, left_slope = max((len(v),k) for k,v in left_x.items())
lef_l = np.poly1d(np.polyfit(left_y[left_slope],left_x[left_slope],1))
left_x_st = int(lef_l(max_y))
left_x_en = int(lef_l(min_y))
cv2.line(img,(left_x_st,max_y),(left_x_en,min_y),[255,0,0],thickness)
eqns[0]=lef_l
except:
left_slope = None
print("left ignored")
# Right
try:
_, right_slope = max((len(v),k) for k,v in right_x.items())
rig_l = np.poly1d(np.polyfit(right_y[right_slope],right_x[right_slope],1))
right_x_st = int(rig_l(max_y))
right_x_en = int(rig_l(min_y))
cv2.line(img,(right_x_st,max_y),(right_x_en,min_y),[0,255,0],thickness)
eqns[1]=rig_l
except:
right_slope = None
print("right ignored")
# Top
try:
_, top_slope = max((len(v),k) for k,v in top_x.items())
top_l = np.poly1d(np.polyfit(top_y[top_slope],top_x[top_slope],1))
top_x_st = int(top_l(max_y))
top_x_en = int(top_l(min_y))
cv2.line(img,(top_x_st,max_y),(top_x_en,min_y),[0,0,255],thickness)
eqns[2]=top_l
except:
top_slope = None
print("top ignored")
return eqns,img
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
# Extracts the hough lines
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
# An empty black image
line_img = | np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) | numpy.zeros |
'''
Created on 29 Sep 2014
@author: edwin
'''
import pickle, logging
import numpy as np
from scipy.sparse import coo_matrix
class DataHandler(object):
'''
classdocs
'''
#default values ####################
scores = np.array([3, 4])
K = 0
N = 0
nclasses = 2
nu0 = np.array([50.0, 50.0])
alpha0 = np.array([[2, 1], [1, 2]])
phi0 = np.array([1, 1])
gamma0 = np.array([[2, 1], [1, 2]])
a0 = 3
b0 = 1
####################################
uselowerbound = False
crowdlabels = None
table_format = False
targetidxmap = None
targetidxs = None
max_targetid = 0
trainids = None
goldlabels = None
goldsubtypes = None
output_file = None
confmat_file = None
input_file = None
gold_file = None
hyperparam_file = None
def __init__(self):
'''
Constructor
'''
def create_target_idx_map(self):
self.max_targetid = np.max(self.targetidxs) # largest original ID value
blanks = np.zeros(len(self.targetidxs)) # only need 1D so set all to zero
idxList = list(range(len(self.targetidxs))) # new local idxs
tIdxMap = coo_matrix(( idxList, (self.targetidxs,blanks)), shape=(self.max_targetid+1,1) )
self.N = len(self.targetidxs)
self.targetidxmap = tIdxMap.tocsr() # maps Original IDs to new local idxs
def loadCrowdLabels(self, scores):
'''
Loads labels from crowd in sparse list format, i.e. 3 columns, classifier ID,
object ID, score.
'''
pyFileExists = False
try:
with open(self.input_file+'.dat','r') as inFile:
crowdLabels, self.targetidxs, K = pickle.load(inFile)
pyFileExists = True
except Exception:
logging.info('Will try to load a CSV file...')
crowdLabels = np.genfromtxt(self.input_file, delimiter=',', \
skip_header=1,usecols=[0,1,2])
self.targetidxs, crowdLabels[:,1] = np.unique(crowdLabels[:,1],return_inverse=True)
kIdxs, crowdLabels[:,0] = np.unique(crowdLabels[:,0],return_inverse=True)
K = len(kIdxs)
unmappedScores = np.round(crowdLabels[:,2])
for i,s in enumerate(scores):
print(np.sum(unmappedScores==s))
crowdLabels[(unmappedScores==s),2] = i
self.create_target_idx_map()
self.crowdlabels = crowdLabels
self.K = K
print(crowdLabels.shape)
# if not pyFileExists:
# try:
# with open(self.input_file+'.dat', 'wb') as outFile:
# pickle.dump((crowdLabels,self.targetidxs,K), outFile)
# except Exception:
# logging.error('Could not save the input data as a Python object file.')
def loadCrowdTable(self, scores):
'''
Loads crowd labels in a table format
'''
unmappedScores = np.round(np.genfromtxt(self.input_file, delimiter=','))
self.K = unmappedScores.shape[1]
self.targetidxs = np.arange(unmappedScores.shape[0])
self.create_target_idx_map()
self.crowdlabels = | np.empty((self.N,self.K)) | numpy.empty |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
TODO:
- Speed tests, need to be certain the looping on all telescopes is not killing
performance
- Introduce new weighting schemes
- Make intersect_lines code more readable
"""
import numpy as np
import itertools
import astropy.units as u
from ctapipe.reco.reco_algorithms import Reconstructor
from ctapipe.io.containers import ReconstructedShowerContainer
from ctapipe.coordinates import NominalFrame, HorizonFrame
from ctapipe.coordinates import TiltedGroundFrame, project_to_ground
from ctapipe.instrument import get_atmosphere_profile_functions
__all__ = [
'HillasIntersection'
]
class HillasIntersection(Reconstructor):
"""
This class is a simple re-implementation of Hillas parameter based event
reconstruction. e.g. https://arxiv.org/abs/astro-ph/0607333
In this case the Hillas parameters are all constructed in the shared
angular ( Nominal) system. Direction reconstruction is performed by
extrapolation of the major axes of the Hillas parameters in the nominal
system and the weighted average of the crossing points is taken. Core
reconstruction is performed by performing the same procedure in the
tilted ground system.
The height of maximum is reconstructed by the projection os the image
centroid onto the shower axis, taking the weighted average of all images.
Uncertainties on the positions are provided by taking the spread of the
crossing points, however this means that no uncertainty can be provided
for multiplicity 2 events.
"""
def __init__(self, atmosphere_profile_name="paranal"):
# We need a conversion function from height above ground to depth of maximum
# To do this we need the conversion table from CORSIKA
_ = get_atmosphere_profile_functions(atmosphere_profile_name)
self.thickness_profile, self.altitude_profile = _
def predict(self, hillas_parameters, tel_x, tel_y, array_direction):
"""
Parameters
----------
hillas_parameters: dict
Dictionary containing Hillas parameters for all telescopes
in reconstruction
tel_x: dict
Dictionary containing telescope position on ground for all
telescopes in reconstruction
tel_y: dict
Dictionary containing telescope position on ground for all
telescopes in reconstruction
array_direction: HorizonFrame
Pointing direction of the array
Returns
-------
ReconstructedShowerContainer:
"""
src_x, src_y, err_x, err_y = self.reconstruct_nominal(hillas_parameters)
core_x, core_y, core_err_x, core_err_y = self.reconstruct_tilted(
hillas_parameters, tel_x, tel_y)
err_x *= u.rad
err_y *= u.rad
nom = NominalFrame(x=src_x * u.rad, y=src_y * u.rad,
array_direction=array_direction)
horiz = nom.transform_to(HorizonFrame())
result = ReconstructedShowerContainer()
result.alt, result.az = horiz.alt, horiz.az
tilt = TiltedGroundFrame(x=core_x * u.m, y=core_y * u.m,
pointing_direction=array_direction)
grd = project_to_ground(tilt)
result.core_x = grd.x
result.core_y = grd.y
x_max = self.reconstruct_xmax(nom.x, nom.y,
tilt.x, tilt.y,
hillas_parameters,
tel_x, tel_y,
90 * u.deg - array_direction.alt)
result.core_uncert = np.sqrt(core_err_x * core_err_x
+ core_err_y * core_err_y) * u.m
result.tel_ids = [h for h in hillas_parameters.keys()]
result.average_size = np.mean([h.intensity for h in hillas_parameters.values()])
result.is_valid = True
src_error = np.sqrt(err_x * err_x + err_y * err_y)
result.alt_uncert = src_error.to(u.deg)
result.az_uncert = src_error.to(u.deg)
result.h_max = x_max
result.h_max_uncert = np.nan
result.goodness_of_fit = np.nan
return result
def reconstruct_nominal(self, hillas_parameters, weighting="Konrad"):
"""
Perform event reconstruction by simple Hillas parameter intersection
in the nominal system
Parameters
----------
hillas_parameters: dict
Hillas parameter objects
weighting: string
Specify image weighting scheme used (HESS or Konrad style)
Returns
-------
Reconstructed event position in the nominal system
"""
if len(hillas_parameters) < 2:
return None # Throw away events with < 2 images
# Find all pairs of Hillas parameters
combos = itertools.combinations(list(hillas_parameters.values()), 2)
hillas_pairs = list(combos)
# Copy parameters we need to a numpy array to speed things up
h1 = list(
map(
lambda h: [h[0].psi.to(u.rad).value,
h[0].x.value,
h[0].y.value,
h[0].intensity], hillas_pairs
)
)
h1 = np.array(h1)
h1 = np.transpose(h1)
h2 = list(
map(lambda h: [h[1].psi.to(u.rad).value,
h[1].x.value,
h[1].y.value,
h[1].intensity], hillas_pairs)
)
h2 = np.array(h2)
h2 = np.transpose(h2)
# Perform intersection
sx, sy = self.intersect_lines(h1[1], h1[2], h1[0],
h2[1], h2[2], h2[0])
if weighting == "Konrad":
weight_fn = self.weight_konrad
elif weighting == "HESS":
weight_fn = self.weight_HESS
# Weight by chosen method
weight = weight_fn(h1[3], h2[3])
# And sin of interception angle
weight *= self.weight_sin(h1[0], h2[0])
# Make weighted average of all possible pairs
x_pos = np.average(sx, weights=weight)
y_pos = np.average(sy, weights=weight)
var_x = np.average((sx - x_pos) ** 2, weights=weight)
var_y = np.average((sy - y_pos) ** 2, weights=weight)
# Copy into nominal coordinate
return x_pos, y_pos, np.sqrt(var_x), np.sqrt(var_y)
def reconstruct_tilted(self, hillas_parameters, tel_x, tel_y,
weighting="Konrad"):
"""
Core position reconstruction by image axis intersection in the tilted
system
Parameters
----------
hillas_parameters: dict
Hillas parameter objects
tel_x: dict
Telescope X positions, tilted system
tel_y: dict
Telescope Y positions, tilted system
weighting: str
Weighting scheme for averaging of crossing points
Returns
-------
(float, float, float, float):
core position X, core position Y, core uncertainty X,
core uncertainty X
"""
if len(hillas_parameters) < 2:
return None # Throw away events with < 2 images
h = list()
tx = list()
ty = list()
# Need to loop here as dict is unordered
for tel in hillas_parameters.keys():
h.append(hillas_parameters[tel])
tx.append(tel_x[tel])
ty.append(tel_y[tel])
# Find all pairs of Hillas parameters
hillas_pairs = list(itertools.combinations(h, 2))
tel_x = list(itertools.combinations(tx, 2))
tel_y = list(itertools.combinations(ty, 2))
tx = np.zeros((len(tel_x), 2))
ty = np.zeros((len(tel_y), 2))
for i, _ in enumerate(tel_x):
tx[i][0], tx[i][1] = tel_x[i][0].value, tel_x[i][1].value
ty[i][0], ty[i][1] = tel_y[i][0].value, tel_y[i][1].value
tel_x = np.array(tx)
tel_y = np.array(ty)
# Copy parameters we need to a numpy array to speed things up
h1 = map(lambda h: [h[0].psi.to(u.rad).value, h[0].intensity], hillas_pairs)
h1 = np.array(list(h1))
h1 = np.transpose(h1)
h2 = map(lambda h: [h[1].psi.to(u.rad).value, h[1].intensity], hillas_pairs)
h2 = np.array(list(h2))
h2 = np.transpose(h2)
# Perform intersection
cx, cy = self.intersect_lines(tel_x[:, 0], tel_y[:, 0], h1[0],
tel_x[:, 1], tel_y[:, 1], h2[0])
if weighting == "Konrad":
weight_fn = self.weight_konrad
elif weighting == "HESS":
weight_fn = self.weight_HESS
# Weight by chosen method
weight = weight_fn(h1[1], h2[1])
# And sin of interception angle
weight *= self.weight_sin(h1[0], h2[0])
# Make weighted average of all possible pairs
x_pos = np.average(cx, weights=weight)
y_pos = np.average(cy, weights=weight)
var_x = np.average((cx - x_pos) ** 2, weights=weight)
var_y = np.average((cy - y_pos) ** 2, weights=weight)
return x_pos, y_pos, np.sqrt(var_x), np.sqrt(var_y)
def reconstruct_xmax(self, source_x, source_y, core_x, core_y,
hillas_parameters, tel_x, tel_y, zen):
"""
Geometrical depth of shower maximum reconstruction, assuming the shower
maximum lies at the image centroid
Parameters
----------
source_x: float
Source X position in nominal system
source_y: float
Source Y position in nominal system
core_x: float
Core X position in nominal system
core_y: float
Core Y position in nominal system
hillas_parameters: dict
Dictionary of hillas parameters objects
tel_x: dict
Dictionary of telescope X positions
tel_y: dict
Dictionary of telescope X positions
zen: float
Zenith angle of shower
Returns
-------
float:
Estimated depth of shower maximum
"""
cog_x = list()
cog_y = list()
amp = list()
tx = list()
ty = list()
# Loops over telescopes in event
for tel in hillas_parameters.keys():
cog_x.append(hillas_parameters[tel].x.to(u.rad).value)
cog_y.append(hillas_parameters[tel].y.to(u.rad).value)
amp.append(hillas_parameters[tel].intensity)
tx.append(tel_x[tel].to(u.m).value)
ty.append(tel_y[tel].to(u.m).value)
height = get_shower_height(source_x.to(u.rad).value,
source_y.to(u.rad).value,
np.array(cog_x),
np.array(cog_y),
core_x.to(u.m).value,
core_y.to(u.m).value,
np.array(tx),
np.array(ty))
weight = np.array(amp)
mean_height = np.sum(height * weight) / np.sum(weight)
# This value is height above telescope in the tilted system,
# we should convert to height above ground
mean_height *= np.cos(zen)
# Add on the height of the detector above sea level
mean_height += 2100 # TODO: replace with instrument info
if mean_height > 100000 or np.isnan(mean_height):
mean_height = 100000
mean_height *= u.m
# Lookup this height in the depth tables, the convert Hmax to Xmax
x_max = self.thickness_profile(mean_height.to(u.km))
# Convert to slant depth
x_max /= np.cos(zen)
return x_max
@staticmethod
def intersect_lines(xp1, yp1, phi1, xp2, yp2, phi2):
"""
Perform intersection of two lines. This code is borrowed from read_hess.
Parameters
----------
xp1: ndarray
X position of first image
yp1: ndarray
Y position of first image
phi1: ndarray
Rotation angle of first image
xp2: ndarray
X position of second image
yp2: ndarray
Y position of second image
phi2: ndarray
Rotation angle of second image
Returns
-------
ndarray of x and y crossing points for all pairs
"""
sin_1 = np.sin(phi1)
cos_1 = np.cos(phi1)
a1 = sin_1
b1 = -1 * cos_1
c1 = yp1 * cos_1 - xp1 * sin_1
sin_2 = np.sin(phi2)
cos_2 = np.cos(phi2)
a2 = sin_2
b2 = -1 * cos_2
c2 = yp2 * cos_2 - xp2 * sin_2
det_ab = (a1 * b2 - a2 * b1)
det_bc = (b1 * c2 - b2 * c1)
det_ca = (c1 * a2 - c2 * a1)
# if math.fabs(det_ab) < 1e-14 : # /* parallel */
# return 0,0
xs = det_bc / det_ab
ys = det_ca / det_ab
return xs, ys
@staticmethod
def weight_konrad(p1, p2):
return (p1 * p2) / (p1 + p2)
@staticmethod
def weight_hess(p1, p2):
return 1 / ((1 / p1) + (1 / p2))
@staticmethod
def weight_sin(phi1, phi2):
return np.abs(np.sin(np.fabs(phi1 - phi2)))
def get_shower_height(source_x, source_y, cog_x, cog_y,
core_x, core_y, tel_pos_x, tel_pos_y):
"""
Function to calculate the depth of shower maximum geometrically under the assumption
that the shower maximum lies at the brightest point of the camera image.
Parameters
----------
source_x: float
Event source position in nominal frame
source_y: float
Event source position in nominal frame
core_x: float
Event core position in telescope tilted frame
core_y: float
Event core position in telescope tilted frame
zen: float
Zenith angle of event
Returns
-------
float: Depth of maximum of air shower
"""
# Calculate displacement of image centroid from source position (in rad)
disp = np.sqrt(np.power(cog_x - source_x, 2) +
np.power(cog_y - source_y, 2))
# Calculate impact parameter of the shower
impact = np.sqrt(np.power(tel_pos_x - core_x, 2) +
| np.power(tel_pos_y - core_y, 2) | numpy.power |
import matplotlib
import math
import cv2
import numpy as np
# 格子地図クラス
class GridMap:
def __init__(self, map_name, resolution=[0.05, 0.05, math.pi/12], origin=[0.0, 0.0, 0.0],
obstacle_thresh=254):
# マップ名
self.map_name = map_name
# マップ画像
self.map_image = cv2.imread('map/' + map_name + '.png', cv2.IMREAD_GRAYSCALE)
# マップデータ(座標系合わせ後)
self.map_data = self.map_image.T[:, ::-1]
# 解像度 (m/pixel)
self.resolution = np.array(resolution)
# セルの数
self.cell_num = np.array([self.map_image.shape[0], self.map_image.shape[1], 24])
# マップ内の最小の姿勢
self.pose_min = np.array(origin)
# マップ内の最大の姿勢
self.pose_max = self.cell_num*self.resolution + self.pose_min
# 障害物閾値
self.obstacle_thresh = obstacle_thresh
# 価値関数の初期化
self.value_data = self.init_value(self.cell_num, self.map_name)
# 価値関数参照
def value(self, pose):
index = self.to_index(pose)
value = self.value_data[index]
return value
# 姿勢を整数のインデックスに変換
def to_index(self, pose):
index = | np.floor((pose - self.pose_min)/self.resolution) | numpy.floor |
import os
import time
import numpy as np
import argparse
import functools
from PIL import Image
from PIL import ImageDraw
import paddle
import paddle.fluid as fluid
import reader
from mobilenet_ssd import mobile_net
from utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('dataset', str, 'pascalvoc', "coco and pascalvoc.")
add_arg('use_gpu', bool, True, "Whether use GPU.")
add_arg('image_path', str, '', "The image used to inference and visualize.")
add_arg('model_dir', str, '', "The model path.")
add_arg('nms_threshold', float, 0.45, "NMS threshold.")
add_arg('confs_threshold', float, 0.2, "Confidence threshold to draw bbox.")
add_arg('resize_h', int, 300, "The resized image height.")
add_arg('resize_w', int, 300, "The resized image height.")
add_arg('mean_value_B', float, 127.5, "Mean value for B channel which will be subtracted.") #123.68
add_arg('mean_value_G', float, 127.5, "Mean value for G channel which will be subtracted.") #116.78
add_arg('mean_value_R', float, 127.5, "Mean value for R channel which will be subtracted.") #103.94
# yapf: enable
def infer(args, data_args, image_path, model_dir):
image_shape = [3, data_args.resize_h, data_args.resize_w]
if 'coco' in data_args.dataset:
num_classes = 91
elif 'pascalvoc' in data_args.dataset:
num_classes = 21
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)
nmsed_out = fluid.layers.detection_output(
locs, confs, box, box_var, nms_threshold=args.nms_threshold)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
# yapf: disable
if model_dir:
def if_exist(var):
return os.path.exists(os.path.join(model_dir, var.name))
fluid.io.load_vars(exe, model_dir, predicate=if_exist)
# yapf: enable
infer_reader = reader.infer(data_args, image_path)
feeder = fluid.DataFeeder(place=place, feed_list=[image])
def infer():
data = infer_reader()
nmsed_out_v = exe.run(fluid.default_main_program(),
feed=feeder.feed([[data]]),
fetch_list=[nmsed_out],
return_numpy=False)
nmsed_out_v = | np.array(nmsed_out_v[0]) | numpy.array |
import sys
import time
import scipy.special
import numpy as np
#==============================================================================
# Helper functions defines here
#==============================================================================
def init():
"""
Intialize parameters that will be used in the program.
Parameters
----------
None
Returns
----------
dataset: ndarray
The whole dataset read from the input file.
outFileName: String
Name of output file.
iterNum: int
Number of iteration.
burninNum: int
Burn-in number of iterations.
obsNum: int
Number of observations, e.g., number of population.
SNPNum: int
Number of single nucleotide polymorphisms (SNPs).
PhenoNum: int
Number of phenotype types.
MAF: float
Minor Allele Frequency, should be less than 1.
"""
if len(sys.argv) != 9:
print("Number of arguments don't meet program's requirements.",
"Please Check again!")
print("You should specify inputFileName, outputFileName, iterNum,",
"burninNum, obsNum, \n\tSNPNum, PhenoNum, MAF in the arguments.")
print("Here is an example: ")
print(">>> python np_bhit.py input.txt output.npy 30000 29000 200 100 1 0.5")
quit()
dataset = np.loadtxt(sys.argv[1])
outFileName = sys.argv[2]
iterNum = int(sys.argv[3])
burninNum = int(sys.argv[4])
obsNum = int(sys.argv[5])
SNPNum = int(sys.argv[6])
PhenoNum = int(sys.argv[7])
MAF = float(sys.argv[8])
return dataset, outFileName, iterNum, burninNum, obsNum, SNPNum, PhenoNum, MAF
def unique(arr, return_counts=True):
"""
Find the unique elements of an array. Return the sorted unique elements
of an array, and the number of times each unique value comes up in the
input array if `return_counts` is True.
Parameters
----------
arr: array_like
Input array.
return_counts: bool
If True, also return the number of times each unique item appears
in `arr`.
Returns
----------
unique: ndarray
The sorted unique values.
unique_counts: ndarray
The number of times each of the unique values comes up in the
original array.
"""
arr = np.asanyarray(arr)
orig_shape, orig_dtype = arr.shape, arr.dtype
# Must reshape to a contiguous 2D array for this to work...
arr = arr.reshape(orig_shape[0], -1)
arr = np.ascontiguousarray(arr)
if arr.dtype.char in (np.typecodes['AllInteger'] +
np.typecodes['Datetime'] + 'S'):
# Optimization: Creating a view of your data with a np.void data type of
# size the number of bytes in a full row. Handles any type where items
# have a unique binary representation, i.e. 0 is only 0, not +0 and -0.
dtype = np.dtype((np.void, arr.dtype.itemsize * arr.shape[1]))
else:
dtype = [('f{i}'.format(i=i), arr.dtype) for i in range(arr.shape[1])]
try:
consolidated = arr.view(dtype)
except TypeError:
# There's no good way to do this for object arrays, etc...
msg = 'The axis argument to unique is not supported for dtype {dt}'
raise TypeError(msg.format(dt=arr.dtype))
def reshape_uniq(uniq):
uniq = uniq.view(orig_dtype)
uniq = uniq.reshape(-1, *orig_shape[1:])
uniq = np.swapaxes(uniq, 0, 0)
return uniq
tmp = np.asanyarray(consolidated).flatten()
if tmp.size == 0:
if not return_counts:
output = tmp
else:
output = (tmp,)
output += (np.empty(0, np.intp),)
else:
tmp.sort()
aux = tmp
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if not return_counts:
output = aux[flag]
else:
output = (aux[flag],)
idx = np.concatenate(np.nonzero(flag) + ([tmp.size],))
output += (np.diff(idx),)
if not return_counts:
return reshape_uniq(output)
else:
uniq = reshape_uniq(output[0])
return (uniq,) + output[1:]
def logLikeCont(contArr):
"""
Calculate logarithmic likelihood of only continuous variates given the
data array. Variates are assumed to be independent if there is only
one column, and from a multivariate Gaussian distribution if there are
multiple columns.
Parameters
----------
contArr: array_like
Input array.
Returns
----------
logProb: float
Logarithmic likelihood of continuous variates.
"""
Y = np.asanyarray(contArr)
obsNum, varNum = Y.shape
logProb = 0
if varNum == 0:
pass
elif varNum == 1:
sigma = 1
mean = np.average(Y)
nuVar = NU0 * sigma**2
nuVar += (obsNum-1) * np.var(Y)
nuVar += KAPPA0*obsNum/(KAPPA0+obsNum) * (mean-MU0)**2
logProb = -1*np.log(2*np.pi)*obsNum/2 + np.log(KAPPA0/(KAPPA0 + obsNum)) / 2 + scipy.special.gammaln((NU0+obsNum)/2)
logProb += (-1*scipy.special.gammaln(NU0/2) + np.log(NU0*sigma**2/2)*NU0/2 - np.log(nuVar/2) * (NU0+obsNum)/2)
# The below code was not fully tested.
else:
means = np.average(Y, axis=0)
lambda_arr = np.diag([1]*varNum)
diff = np.array(means - MU0)[:,None]
lambdaN = (lambda_arr + KAPPA0 * obsNum / (KAPPA0+obsNum)
* diff.dot(diff.transpose()))
lambdaN += (obsNum-1)*np.cov(Y, rowvar=False, bias=False)
logProb = (-np.log(np.pi) * obsNum * varNum / 2 + np.log(KAPPA0/
(KAPPA0 + obsNum) * varNum / 2))
logProb += np.log(np.linalg.det(lambda_arr)) * NU0/2
logProb -= np.log(np.linalg.det(lambdaN)) * (NU0+obsNum)/2
logProb += np.sum(scipy.special.gammaln((NU0+obsNum)/2 -
np.arange(varNum)/2) - scipy.special.gammaln(NU0/2 -
np.arange(varNum)/2))
return logProb
def logLikeDisc(discArr):
"""
Calculate logarithmic likelihood of only discrete variates given the
data array. Variates are assumed to be independent if there is only
one column, and from a joint Dirichlet distribution if there are
multiple columns.
Parameters
----------
discArr: array_like
Input array.
Returns
----------
logProb: float
Logarithmic likelihood of discrete variates.
"""
X = np.asanyarray(discArr)
uniqueArr, N = unique(X)
alpha = Odds[uniqueArr-1]
alpha = np.prod(alpha, axis=1)
n_plus_alpha = N + alpha
logProb = np.sum(scipy.special.gammaln(n_plus_alpha) - scipy.special.gammaln(alpha))
logProb -= scipy.special.gammaln(np.sum(n_plus_alpha))
return logProb
def logLikeDepe(discArr, contArr):
"""
Calculate logarithmic likelihood of partitions with both continuous and
discrete variates by finding the continous rows corresponding to unique
discrete observations and calculating the probability those continous
observations came from a single multivariate Gaussian distribution.
Parameters
----------
discArr: array_like
Input discrete array.
contArr: array_like
Input continous array.
Returns
----------
logProb: float
Logarithmic likelihood.
"""
X = np.asanyarray(discArr)
Y = np.asanyarray(contArr)
variations = unique(X, return_counts=False)
logProb = 0
for v in variations:
corres_row = np.prod((X==v), axis=1)
corres_Y = Y[corres_row==1]
logProb += logLikeCont(corres_Y)
logProb += logLikeDisc(X)
return logProb
def metroHast(iterNum, burninNum):
"""
The Metropolis–Hastings algorithm is a Markov chain Monte Carlo (MCMC)
method for obtaining a sequence of random samples from a probability
distribution for which direct sampling is difficult. The reult returned
is the final probability matrix for each covariate.
Parameters
----------
iterNum: number
Number of iteration of MCMC.
burninNum: number
Number of burn-in of MCMC.
Returns
----------
mhResult: ndarray
Final partition matrix for each covariate.
Ix: ndarray
Final index vector.
"""
Ix = np.arange(TotalNum)
Dx = Ix[:SNPNum]
Cx = Ix[SNPNum:TotalNum]
iN = np.zeros([TotalNum, TotalNum+1])
# Uncomment if you want to trace probabilities.
# trace = np.zeros(iterNum)
# trace[0] += np.sum([logLikDisc(Genotype[:, Dx==col]) for col in range(SNPNum)])
# trace[0] += np.sum([logLikCont(Phenotype[:, Cx==col]) for col in range(SNPNum,TotalNum)])
# Main Metropolis-Hastings loop.
for i in range(1, iterNum):
# Select an index, then change it to another index randomly.
while True:
# Sort the number to ensure changing from small index to big one.
x, y = np.sort(np.random.choice(Ix, 2, False))
k = np.where(Ix == x)[0]
if len(k) > 1:
k = np.random.choice(k, 1)
Iy = np.array(Ix)
Iy[k] = y
tmp1 = np.where(Ix == x)[0]
tmp2 = np.where(Iy == y)[0]
if (len(tmp1)>1 or len(tmp2)>1):
break
# Create the proposed indicator vector.
Dy = Iy[:SNPNum]
Cy = Iy[SNPNum:TotalNum]
Cxx = Phenotype[:,Cx == x]
Cxy = Phenotype[:,Cx == y]
Cyx = Phenotype[:,Cy == x]
Cyy = Phenotype[:,Cy == y]
Dxx = Genotype[:,Dx == x]
Dxy = Genotype[:,Dx == y]
Dyx = Genotype[:,Dy == x]
Dyy = Genotype[:,Dy == y]
# Calculate log likelihoods.
old_prob = 0
new_prob = 0
# Likelihood of current partition x.
if Cxx.size != 0:
if Dxx.size != 0:
old_prob += logLikeDepe(Dxx, Cxx)
else:
old_prob += logLikeCont(Cxx)
elif Dxx.size != 0:
old_prob += logLikeDisc(Dxx)
# Likelihood of current partition x.
if Cxy.size != 0:
if Dxy.size != 0:
old_prob += logLikeDepe(Dxy, Cxy)
else:
old_prob += logLikeCont(Cxy)
elif Dxy.size != 0:
old_prob += logLikeDisc(Dxy)
# Likelihood of proposed partition y.
if Cyx.size != 0:
if Dyx.size != 0:
new_prob += logLikeDepe(Dyx, Cyx)
else:
new_prob += logLikeCont(Cyx)
elif Dyx.size != 0:
new_prob += logLikeDisc(Dyx)
# Likelihood of proposed partition y.
if Cyy.size != 0:
if Dyy.size != 0:
new_prob += logLikeDepe(Dyy, Cyy)
else:
new_prob += logLikeCont(Cyy)
elif Dyy.size != 0:
new_prob += logLikeDisc(Dyy)
# Uncomment if you want to trace probabilities.
# trace[i] = trace[i-1]
# Check if proposal is accepted, if so, update everything.
accept = np.log(np.random.rand()) <= min(0, new_prob-old_prob)
if accept:
Ix = np.array(Iy)
Cx = np.array(Cy)
Dx = | np.array(Dy) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 29 16:24:16 2020
@author: mclea
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import convolve2d
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import griddata
from matplotlib import cm
import kernprof
from line_profiler import LineProfiler
from scipy.sparse import csr_matrix
import time
def restrict(A):
"""
Creates a new grid of points which is half the size of the original
grid in each dimension.
"""
n = A.shape[0]
m = A.shape[1]
new_n = int((n-2)/2+2)
new_m = int((m-2)/2+2)
new_array = np.zeros((new_n, new_m))
for i in range(1, new_n-1):
for j in range(1, new_m-1):
ii = int((i-1)*2)+1
jj = int((j-1)*2)+1
# print(i, j, ii, jj)
new_array[i,j] = np.average(A[ii:ii+2, jj:jj+2])
new_array = set_BC(new_array)
return new_array
def interpolate_array(A):
"""
Creates a grid of points which is double the size of the original
grid in each dimension. Uses linear interpolation between grid points.
"""
n = A.shape[0]
m = A.shape[1]
new_n = int((n-2)*2 + 2)
new_m = int((m-2)*2 + 2)
new_array = np.zeros((new_n, new_m))
i = ( | np.indices(A.shape) | numpy.indices |
# Forward: given model/pde parameters λ -> u(t, x)
import time, sys, os, json
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.mplot3d import Axes3D
# from plotting import newfig, savefig
# from mpl_toolkits.axes_grid1 import make_axes_locatable
# from pyDOE import lhs
# from scipy.interpolate import griddata
# import scipy.io
sys.path.insert(0, '../../Utilities/') # for plotting
# from plotting import newfig, savefig
# np.random.seed(1234)
# tf.set_random_seed(1234)
class PhysicsInformedNN:
# Initialize the class
def __init__(self, xb, yb, x0, xe, y0, ye, boundaryU, boundaryU_, xf_grid, yf_grid, layers, lowerbound, upperbound, mix):
self.mix = mix
self.xb = xb
self.yb = yb
self.x0 = x0
self.xe = xe
self.y0 = y0
self.ye = ye
self.ul = boundaryU[0]
self.ur = boundaryU[1]
self.ub = boundaryU[2]
self.ut = boundaryU[3]
if self.mix:
self.ul_x = boundaryU_[0]
self.ur_x = boundaryU_[1]
self.ub_y = boundaryU_[2]
self.ut_y = boundaryU_[3]
self.xf_grid = xf_grid
self.yf_grid = yf_grid
self.lowerbound = lowerbound
self.upperbound = upperbound
self.layers = layers
# Initialize NN
self.weights, self.biases = self.initialize_NN(layers)
# shape: (N_b, 1)
self.xb_tf = tf.placeholder(tf.float32, shape=[None, self.xb.shape[1]])
self.yb_tf = tf.placeholder(tf.float32, shape=[None, self.yb.shape[1]])
self.x0_tf = tf.placeholder(tf.float32, shape=[None, self.x0.shape[1]])
self.xe_tf = tf.placeholder(tf.float32, shape=[None, self.xe.shape[1]])
self.y0_tf = tf.placeholder(tf.float32, shape=[None, self.y0.shape[1]])
self.ye_tf = tf.placeholder(tf.float32, shape=[None, self.ye.shape[1]])
self.ul_tf = tf.placeholder(tf.float32, shape=[None, self.ul.shape[1]])
self.ur_tf = tf.placeholder(tf.float32, shape=[None, self.ur.shape[1]])
self.ub_tf = tf.placeholder(tf.float32, shape=[None, self.ub.shape[1]])
self.ut_tf = tf.placeholder(tf.float32, shape=[None, self.ut.shape[1]])
if self.mix:
self.ul_x_tf = tf.placeholder(tf.float32, shape=[None, self.ul_x.shape[1]])
self.ur_x_tf = tf.placeholder(tf.float32, shape=[None, self.ur_x.shape[1]])
self.ub_y_tf = tf.placeholder(tf.float32, shape=[None, self.ub_y.shape[1]])
self.ut_y_tf = tf.placeholder(tf.float32, shape=[None, self.ut_y.shape[1]])
# shape: (N_f * N_f, 1) because in net_all: X = tf.concat([x,y],1)
self.xf_grid_tf = tf.placeholder(tf.float32, shape=[None, self.xf_grid.shape[1]])
self.yf_grid_tf = tf.placeholder(tf.float32, shape=[None, self.yf_grid.shape[1]])
self.lr_tf = tf.placeholder(tf.float32)
# tf Graphs: u, u_x, u_y, f = net_all(x, y)
self.ul_pred, self.ul_x_pred, _, _ = self.net_all(self.x0_tf, self.yb_tf)
self.ur_pred, self.ur_x_pred, _, _ = self.net_all(self.xe_tf, self.yb_tf)
self.ub_pred, _, self.ub_y_pred, _ = self.net_all(self.xb_tf, self.y0_tf)
self.ut_pred, _, self.ut_y_pred, _ = self.net_all(self.xb_tf, self.ye_tf)
# used in predict (only call net_all once)
self.uf_pred, _, _, self.f_pred = self.net_all(self.xf_grid_tf, self.yf_grid_tf)
# Loss: boundary(u, u_x, u_y) + PDE (f = u_xx + u_yy = 0)
if not self.mix: # purely u for boundary condition
self.loss = tf.reduce_mean(tf.square(self.ul_tf - self.ul_pred)) + \
tf.reduce_mean(tf.square(self.ur_tf - self.ur_pred)) + \
tf.reduce_mean(tf.square(self.ub_tf - self.ub_pred)) + \
tf.reduce_mean(tf.square(self.ut_tf - self.ut_pred)) + \
tf.reduce_mean(tf.square(self.f_pred))
else: # mix of u and u_x, u_y for boundary condition
self.loss = tf.reduce_mean(tf.square(self.ul_x_tf - self.ul_x_pred)) + \
tf.reduce_mean(tf.square(self.ur_tf - self.ur_pred)) + \
tf.reduce_mean(tf.square(self.ub_tf - self.ub_pred)) + \
tf.reduce_mean(tf.square(self.ut_y_tf - self.ut_y_pred)) + \
tf.reduce_mean(tf.square(self.f_pred))
# tf.reduce_mean: computes the mean of elements across dimensions of a tensor
# Optimizers:
# return a minimization Op (a graph node that performs computation on tensors) -> updates weights and biases
self.train_op_Adam = tf.train.AdamOptimizer(learning_rate = self.lr_tf).minimize(self.loss)
# tf session: initiates a tf Graph (defines computations) that processes tensors through operations + allocates resources + holds intermediate values
self.sess = tf.Session()
# variables now hold the values from declarations: tf.Variable(tf.zeros(...)), tf.Variable(tf.random_normal(...)), etc
init = tf.global_variables_initializer()
self.sess.run(init) # required to initialize the variables
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0, num_layers-1):
# tf.Variable: for trainable variables/mutable tensor values that persist across multiple sesssion.run()
# https://towardsdatascience.com/understanding-fundamentals-of-tensorflow-program-and-why-it-is-necessary-94cf5b60e255
weights.append(self.xavier_init(size=[layers[l], layers[l+1]]))
biases.append(tf.Variable(tf.zeros([1, layers[l+1]], dtype=tf.float32), dtype=tf.float32)) # all zeros
return weights, biases
def xavier_init(self, size):
# https://towardsdatascience.com/weight-initialization-in-neural-networks-a-journey-from-the-basics-to-kaiming-954fb9b47c79
# Want each layer's activation outputs to have stddev around 1 -> repeat matrix mult across as many layers without activations exploding or vanishing
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
# random values from a truncated normal distribution (values whose magnitude>2 staddev from mean are dropped and re-picked)
# Shape of the output tensor: [layers[l], layers[l+1]]
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)
def neural_net(self, X, weights, biases):
num_layers = len(weights) + 1
H = 2.0*(X - self.lowerbound)/(self.upperbound - self.lowerbound) - 1 # Initializing first input: mapping to [-1, 1]
for l in range(0, num_layers-2):
W = weights[l]
b = biases[l]
H = tf.tanh(tf.add(tf.matmul(H, W), b)) # passing along networks
# NOTE: H*W=(50, 20) + B(1, 20) -> tf does broadcasting: B becomes (50, 20)
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b) # passed 5 times in total
return Y
def net_all(self, x, y):
X = tf.concat([x, y], 1) # input
# x = [[-0.5], [0.5]] # y = [[0], [1]]
# [[-0.5, 0]
# [0.5, 1]]
u = self.neural_net(X, self.weights, self.biases)
u_x = tf.gradients(u, x)[0]
u_xx = tf.gradients(u_x, x)[0]
u_y = tf.gradients(u, y)[0]
u_yy = tf.gradients(u_y, y)[0]
f = u_xx + u_yy # f = u_xx + u_yy = 0
return u, u_x, u_y, f
def callback(self, loss):
print('Loss:', loss)
def train(self, lr): # one iteration: uses all training data from tf_dict and updates weights and biases
tf_dict = { self.x0_tf: self.x0, self.xe_tf: self.xe, self.xb_tf: self.xb,
self.y0_tf: self.y0, self.ye_tf: self.ye, self.yb_tf: self.yb,
self.ul_tf: self.ul, self.ur_tf: self.ur, self.ub_tf: self.ub, self.ut_tf: self.ut,
self.xf_grid_tf: self.xf_grid, self.yf_grid_tf: self.yf_grid,
self.lr_tf: lr}
if self.mix:
tf_dict.update({
self.ul_x_tf: self.ul_x, self.ur_x_tf: self.ur_x,
self.ub_y_tf: self.ub_y, self.ut_y_tf: self.ut_y
})
# feeding training examples during training and running the minimization Op of self.loss
self.sess.run(self.train_op_Adam, tf_dict)
loss_value = self.sess.run(self.loss, tf_dict)
return loss_value
def predict(self, x_grid, y_grid): # tf.concat([x, y], 1)
tf_dict = {self.xf_grid_tf: x_grid, self.yf_grid_tf: y_grid}
u, f = self.sess.run([self.uf_pred, self.f_pred], tf_dict)
return u, f
def contourPlot(xtest_mesh, ytest_mesh, u_test, u_pred, N_test, i):
fig = plt.figure(figsize=(6.0, 5.3))
gs = gridspec.GridSpec(nrows=2, ncols=3, figure=fig, width_ratios=[6, 6, 0.6], height_ratios=[1, 1], wspace=0.41, hspace=0.33)
ax = fig.add_subplot(gs[0, 0])
cset1 = ax.contourf(xtest_mesh, ytest_mesh, np.reshape(u_test, (N_test, N_test)), levels=30, cmap='winter')
plt.gca().set(xlabel='$x$', ylabel='$y$', title='Exact') # $: mathematical font like latex
ax = fig.add_subplot(gs[0, 1])
cset2 = ax.contourf(xtest_mesh, ytest_mesh, np.reshape(u_pred, (N_test, N_test)), levels=30, cmap='winter')
plt.gca().set(xlabel='$x$', ylabel='$y$', title='Prediction')
ax = fig.add_subplot(gs[0, 2])
fig.colorbar(cset2, cax=ax)
ax = fig.add_subplot(gs[1, 0:2])
cset3 = ax.contourf(xtest_mesh, ytest_mesh, np.reshape(np.abs(u_pred-u_test), (N_test, N_test)), levels=30, cmap='autumn')
plt.gca().set(xlabel='$x$', ylabel='$y$', title='|Prediction - Exact|')
ax = fig.add_subplot(gs[1, 2])
fig.colorbar(cset3, cax=ax)
plt.suptitle(f'Snapshot at Iteration = {i+1}')
fig.subplots_adjust(left=0.09, right=0.89, bottom=0.08, top=0.90)
plt.savefig(f'{dirpath}/forward_2d_contour_iter{i+1}_new.pdf')
plt.close(fig)
with open(f'{dirpath}/forward_2d_contour_upred_iter{i+1}.json', 'w') as f:
json.dump(u_pred.tolist(), f)
def axisToGrid(x, y): # [[0] [0.5] [1]] (N, 1)
x_mesh, y_mesh = np.meshgrid(x, y) # [[0 0.5 1] [0 0.5 1] [0 0.5 1]], [[0 0 0] [0.5 0.5 0.5] [1 1 1]] (N, N)
x_grid = np.reshape(x_mesh.flatten(), (-1, 1)) # [[0] [0.5] [1] [0] [0.5] [1] [0] [0.5] [1]] # (N * N, 1)
y_grid = np.reshape(y_mesh.flatten(), (-1, 1)) # [[0] [0] [0] [0.5] [0.5] [0.5] [1] [1] [1]] # (N * N, 1)
return x_mesh, y_mesh, x_grid, y_grid # net_all: X = tf.concat([x,y],1)
if __name__ == "__main__":
# u_xx + u_yy = 0, x in [0, 1], y in [0, 1]
# u(0, y) = -y^2 ## left (u)
# u(1, y) = 1 - y^2 + 3y ## right (u)
# u(x, 0) = x^2 ## bottom (u)
# u(x, 1) = x^2 - 1 + 3x ## top (u)
# u_x(0, y) = 2x + 3y = 3y ## left (du/dx)
# u_x(1, y) = 2x + 3y = 2 + 3y ## right (du/dx)
# u_y(x, 0) = -2y + 3x = 3x ## bottom (du/dy)
# u_y(x, 1) = -2y + 3x = -2 + 3x ## top (du/dy)
# analytical solution: u(x, y) = x^2 - y^2 + 3xy
# NOTE: du/dn (normal direction) for boundary condition:
# 1) additional information
# 2) makes sense this way: u=temperature, fixed boundary temperatue, du/dn indicates influx/outflux
# NOTE: need at least one edge to be u(x, y), otherwise solution have arbitrary constant
# NOTE: Boundary condition can have order > 1
###########################
## PART 1: setting parameters and getting accurate data for evaluation
# 4-layer deep NN with 20 neurons/layer & hyperbolic tangent act. func.
layers = [2, 50, 50, 50, 1]
mix = True # mix of boundary conditions (u, u_x, u_y)
# Domain bounds
lowerbound = np.array([0, 0])
upperbound = np.array([1, 1])
###########################
## PART 2:setting training and testing data from full analytical solution for uniform grid
# boundary condition
N_b = 20
xb = np.reshape(np.linspace(lowerbound[0], upperbound[0], N_b), (-1, 1)) # [[0] [0.5] [1]] (N_b, 1)
yb = np.reshape(np.linspace(lowerbound[1], upperbound[1], N_b), (-1, 1))
x0 = 0 * yb + lowerbound[0] # left edge # [[0] [0] [0]]
xe = 0 * yb + upperbound[0] # right edge
y0 = 0 * xb + lowerbound[1] # bottom edge
ye = 0 * xb + upperbound[1] # top edge
ul = -1 * yb**2 # u(0, y)
ur = 1 - yb**2 + 3 * yb # u(1, y)
ub = xb**2 # u(x, 0)
ut = xb**2 - 1 + 3 * xb # u(x, 1)
ul_x = 3 * yb # u_x(0, y)
ur_x = 2 + 3 * yb # u_x(1, y)
ub_y = 3 * xb # u_y(x, 0)
ut_y = -2 + 3 * xb # u_y(x, 1)
# collocation points for enforcing f=0 from uniform grid
# NOTE: want PDE satisfied at positions arbitrarily close to boundary -> include boundary points in collocation points
# NOTE: Generally, want interval of training point < smallest characteristic of solution (fluctuation) (dense enough to capture all landscape within domain)
# NOTE: To estimate the density: can estimate fluctuation frequency from f (known), geometry (sharp region higher frequency), prior knowledge
N_f = 30 # along one axis
xf = np.reshape(np.linspace(lowerbound[0], upperbound[0], N_f), (-1, 1)) # (N_f, 1)
yf = np.reshape(np.linspace(lowerbound[1], upperbound[1], N_f), (-1, 1)) # (N_f, 1)
_, _, xf_grid, yf_grid = axisToGrid(xf, yf) # (N_f * N_f, 1)
# testing data
N_test = 50 # NOTE: different from collocation points
xtest = np.reshape(np.linspace(lowerbound[0], upperbound[0], N_test), (-1, 1)) # (N_test, 1)
ytest = np.reshape(np.linspace(lowerbound[1], upperbound[1], N_test), (-1, 1)) # (N_test, 1)
xtest_mesh, ytest_mesh, xtest_grid, ytest_grid = axisToGrid(xtest, ytest) # # (N_test, N_test), (N_test * N_test, 1)
u_test = xtest_grid**2 - ytest_grid**2 + 3 * xtest_grid * ytest_grid # (N_test * N_test, 1)
###########################
## PART 3: forming the network, training, predicting
model = PhysicsInformedNN(xb, yb, x0, xe, y0, ye, [ul, ur, ub, ut], [ul_x, ur_x, ub_y, ut_y], xf_grid, yf_grid, layers, lowerbound, upperbound, mix)
start_time = time.time()
# settings for plots
dirpath = f'./main/2d/forward_2d_figures/{start_time}' # where figures are stored
os.mkdir(dirpath)
ticksize = 8.5
plt.rcParams['xtick.labelsize'] = ticksize
plt.rcParams['ytick.labelsize'] = ticksize
plt.rcParams['axes.labelsize'] = 9.5
plt.rcParams['axes.titlesize'] = 10.5
plt.rcParams['lines.markersize'] = 4
plt.rcParams['legend.handlelength'] = 0.4
annotatesize = 9.5
# Plot 1. Boundary Point, Collocation Point
fig = plt.figure(figsize=(4.2, 2.9))
bc, = plt.plot(np.concatenate((x0,xe,xb,xb)), np.concatenate((yb,yb,y0,ye)), 'H', c='#ffa96b', label = 'Boundary Point', clip_on=False)
cp, = plt.plot(xf_grid, yf_grid, '.', c='#81c9fc', label = 'Collocation Point', clip_on=False)
plt.gca().set(xlim=(0, 1), ylim=(0, 1), xlabel='x', ylabel='y', title='Training Data')
plt.figlegend(handles=[bc, cp], loc='center right', bbox_to_anchor=(0.5, 0., 0.5, 0.5), fontsize=ticksize, framealpha=0.9)
fig.subplots_adjust(left=0.11, right=0.67, bottom=0.13, top=0.92)
plt.savefig(f'{dirpath}/trainingdata.pdf')
plt.close(fig)
dataDict = {
'boundary points':{
'N_b': N_b,
'xb': xb.tolist(),
'yb': yb.tolist(),
'x0': x0.tolist(),
'xe': xe.tolist(),
'y0': y0.tolist(),
'ye': ye.tolist(),
'ul': ul.tolist(),
'ur': ur.tolist(),
'ub': ub.tolist(),
'ut': ut.tolist(),
'ul_x': ul_x.tolist(),
'ur_x': ur_x.tolist(),
'ub_y': ub_y.tolist(),
'ut_y': ut.tolist(),
},
'collocation points':{
'N_f': N_f,
'xf_grid': xf_grid.tolist(),
'yf_grid': yf_grid.tolist(),
},
'testing data':{
"N_test": N_test,
"xtest_mesh": xtest_mesh.tolist(),
"ytest_mesh": ytest_mesh.tolist(),
"u_test": u_test.tolist()
}
}
with open(f'{dirpath}/data.json', 'w') as f:
json.dump(dataDict, f)
# Note: loss around 10^-3/-4 should be about good
loss_values, u_preds, f_preds = ([] for i in range(3))
N_iter = 10000
loss_value_step = 10
pred_step = 100
contour_step = 1000 # if not pred_step's multiple, graph every least common multiple (pred_step, contour_step)
for i in range(N_iter):
lr = 10**-3 * 2**(-i/10000) if i <= 60000 else 10**-3 * 2**(-60000/10000) # 0.00002210/0.00001563 # learning rate decay
loss_value = model.train(lr) # from last iteration
if (i+1) % loss_value_step == 0: # start with i=9 and end with i=8999 (last iter)
loss_values.append(float(loss_value))
print('Iter: %d, Loss: %.3e, Time: %.2f, Learning Rate: %.8f' % (i+1, loss_value, time.time() - start_time, lr))
if (i+1) % pred_step == 0: # start with i=999 and end with i=8999 (last iter)
u_pred, f_pred = model.predict(xtest_grid, ytest_grid)
u_preds.append(u_pred) # (N_test * N_test, 1)
f_preds.append(f_pred) # (N_test * N_test, 1)
if (i+1) % contour_step == 0: # start with i=2999 and end with i=8999 (last iter)
## Plot 2. u (Exact, Preidction) vs (x,y) and |u_pred-u_test| vs (x,y): contour
contourPlot(xtest_mesh, ytest_mesh, u_test, u_pred, N_test, i)
training_time = time.time() - start_time
u_preds = np.array(u_preds)
f_preds = np.array(f_preds)
u_pred, f_pred = model.predict(xtest_grid, ytest_grid)
# NOTE: what is important is the function u_pred resembles, not so much the parameters (weights & biases)
# NOTE: if no analytical solution, find numerical method/other method to verify -> directly use network
###########################
## PART 4: calculating errors
error_u = np.linalg.norm(u_pred - u_test, 2) / np.linalg.norm(u_test, 2) # scalar
print('Error u: %e' % (error_u))
###########################
## PART 5: Plotting
# Plot 3. loss vs. iteration
fig = plt.figure(figsize=(6.8, 6))
plt.ticklabel_format(axis='x', style="sci", scilimits=(3,3))
x_coords = loss_value_step * (np.array(range(len(loss_values))) + 1)
plt.semilogy(x_coords, loss_values) # linear X axis, logarithmic y axis(log scaling on the y axis)
plt.gca().set(xlabel='Iteration', ylabel='Loss', title='Loss during Training')
init_tuple = (loss_value_step, loss_values[0])
plt.annotate('(%d, %.3e)' % init_tuple, xy=init_tuple, fontsize=annotatesize, ha='left')
last_tuple = (N_iter, loss_values[-1])
plt.annotate('(%d, %.3e)' % last_tuple, xy=last_tuple, fontsize=annotatesize, ha='right', va='top')
plt.plot([init_tuple[0], last_tuple[0]], [init_tuple[1], last_tuple[1]], '.', c='#3B75AF')
fig.subplots_adjust(left=0.1, right=0.98, bottom=0.07, top=0.95)
# NOTE: Oscillation: actually very small nummerical difference because of small y scale
# 1. overshoot (fixed -> decaying learning rate)
# 2. Adam: gradient descent + momentum (sometime parameter change makes the loss go up)
plt.savefig(f'{dirpath}/forward_2d_loss.pdf')
plt.close(fig)
with open(f'{dirpath}/forward_2d_loss.json', 'w') as f:
json.dump({"x_coords": x_coords.tolist(), "loss_values": loss_values}, f)
# Plot 4. MSE between u_pred and u_test vs. iteration
fig = plt.figure(figsize=(6, 6))
plt.ticklabel_format(axis='x', style="sci", scilimits=(3,3))
x_coords = pred_step * (np.array(range(len(u_preds))) + 1)
u_mses = [((u_pred - u_test)**2).mean(axis=0) for u_pred in u_preds] #[[mse1] [mse2] [mse3]]
u_mses = | np.array(u_mses) | numpy.array |
'''
Name: color_segmentation.py
Version: 1.0
Summary: K-means color clustering based segmentation. This is achieved
by converting the source image to a desired color space and
running K-means clustering on only the desired channels,
with the pixels being grouped into a desired number
of clusters.
Author: <NAME>
Author-email: <EMAIL>
Created: 2018-05-29
USAGE:
python3 demo_color_seg.py -p ~/plant-image-analysis/test/ -ft JPG
'''
# import the necessary packages
import os
import glob
import argparse
from sklearn.cluster import KMeans
from skimage.feature import peak_local_max
from skimage.morphology import watershed, medial_axis
from skimage import img_as_float, img_as_ubyte, img_as_bool, img_as_int
from skimage import measure
from skimage.segmentation import clear_border
from scipy.spatial import distance as dist
from scipy import optimize
from scipy import ndimage
import math
import numpy as np
import argparse
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import warnings
warnings.filterwarnings("ignore")
import concurrent.futures
import multiprocessing
from multiprocessing import Pool
from contextlib import closing
MBFACTOR = float(1<<20)
# generate foloder to store the output results
def mkdir(path):
# import module
import os
# remove space at the beginning
path=path.strip()
# remove slash at the end
path=path.rstrip("\\")
# path exist? # True # False
isExists=os.path.exists(path)
# process
if not isExists:
# construct the path and folder
#print path + ' folder constructed!'
# make dir
os.makedirs(path)
return True
else:
# if exists, return
#print path+' path exists!'
return False
def color_cluster_seg(image, args_colorspace, args_channels, args_num_clusters, min_size):
# Change image color space, if necessary.
colorSpace = args_colorspace.lower()
if colorSpace == 'hsv':
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
elif colorSpace == 'ycrcb' or colorSpace == 'ycc':
image = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
elif colorSpace == 'lab':
image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
else:
colorSpace = 'bgr' # set for file naming purposes
# Keep only the selected channels for K-means clustering.
if args_channels != 'all':
channels = cv2.split(image)
channelIndices = []
for char in args_channels:
channelIndices.append(int(char))
image = image[:,:,channelIndices]
if len(image.shape) == 2:
image.reshape(image.shape[0], image.shape[1], 1)
(width, height, n_channel) = image.shape
#print("image shape: \n")
#print(width, height, n_channel)
# Flatten the 2D image array into an MxN feature vector, where M is the number of pixels and N is the dimension (number of channels).
reshaped = image.reshape(image.shape[0] * image.shape[1], image.shape[2])
# Perform K-means clustering.
if args_num_clusters < 2:
print('Warning: num-clusters < 2 invalid. Using num-clusters = 2')
#define number of cluster
numClusters = max(2, args_num_clusters)
# clustering method
kmeans = KMeans(n_clusters = numClusters, n_init = 40, max_iter = 500).fit(reshaped)
# get lables
pred_label = kmeans.labels_
# Reshape result back into a 2D array, where each element represents the corresponding pixel's cluster index (0 to K - 1).
clustering = np.reshape(np.array(pred_label, dtype=np.uint8), (image.shape[0], image.shape[1]))
# Sort the cluster labels in order of the frequency with which they occur.
sortedLabels = sorted([n for n in range(numClusters)],key = lambda x: -np.sum(clustering == x))
# Initialize K-means grayscale image; set pixel colors based on clustering.
kmeansImage = np.zeros(image.shape[:2], dtype=np.uint8)
for i, label in enumerate(sortedLabels):
kmeansImage[clustering == label] = int(255 / (numClusters - 1)) * i
ret, thresh = cv2.threshold(kmeansImage,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)
thresh_cleaned = clear_border(thresh)
if np.count_nonzero(thresh) > 0:
thresh_cleaned_bw = clear_border(thresh)
else:
thresh_cleaned_bw = thresh
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(thresh_cleaned, connectivity = 8)
# stats[0], centroids[0] are for the background label. ignore
# cv2.CC_STAT_LEFT, cv2.CC_STAT_TOP, cv2.CC_STAT_WIDTH, cv2.CC_STAT_HEIGHT
sizes = stats[1:, cv2.CC_STAT_AREA]
Coord_left = stats[1:, cv2.CC_STAT_LEFT]
Coord_top = stats[1:, cv2.CC_STAT_TOP]
Coord_width = stats[1:, cv2.CC_STAT_WIDTH]
Coord_height = stats[1:, cv2.CC_STAT_HEIGHT]
Coord_centroids = centroids
#print("Coord_centroids {}\n".format(centroids[1][1]))
#print("[width, height] {} {}\n".format(width, height))
nb_components = nb_components - 1
#min_size = 70
max_size = width*height*0.1
img_thresh = | np.zeros([width, height], dtype=np.uint8) | numpy.zeros |
#!/usr/bin/env python3
from pprint import pformat
from threading import Event
from celery import Celery, group, chain
from celery.exceptions import SoftTimeLimitExceeded
from tools import (sanity_check, calc_indiv, calc_comb, update_cube_cache)
from utils.api import api_request, get_api_creds
from utils.order import cancel_order, place_order, target_orders
from utils.reconcile import reconcile_balances, reconcile_order
from utils.regression import regression
from database import *
import numpy as np
# Replacing datetime.time (Do not move)
from time import time, sleep
CELERY_BROKER_URL = os.getenv('CELERY_BROKER_URL')
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
celery = Celery('trader', backend=CELERY_RESULT_BACKEND, broker=CELERY_BROKER_URL)
celery.conf.broker_transport_options = {'fanout_prefix': True}
celery.conf.broker_transport_options = {'fanout_patterns': True}
celery.conf.worker_prefetch_multiplier = 1
celery.conf.task_time_limit = 1800
celery.conf.task_soft_time_limit = 12000
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
stopevent = Event()
class SqlAlchemyTask(celery.Task):
"""An abstract Celery Task that ensures that the connection the the
database is closed on task completion"""
abstract = True
def after_return(self, status, retval, task_id, args, kwargs, einfo):
db_session.close()
db_session.remove()
@celery.task(base=SqlAlchemyTask)
def place_orders(cube_id, orders):
try:
cube = Cube.query.get(cube_id)
log.debug(f'{cube} Placing Orders')
for order in orders:
# Arguments: cube_id, ex_pair_id, side, amount, price
place_order(cube_id, order[2], order[3], order[0], order[1])
update_cube_cache(cube_id, False)
except SoftTimeLimitExceeded:
update_cube_cache(cube_id, False)
## To do: error handling
@celery.task(base=SqlAlchemyTask)
def cancel_orders(cube_id, ex, orders):
try:
cube = Cube.query.get(cube_id)
log.debug(f'{cube} Canceling Orders')
for order in orders:
cancel_order(cube_id,
ex.id,
order['order_id'],
order['base'],
order['quote']
)
update_cube_cache(cube_id, False)
except SoftTimeLimitExceeded:
update_cube_cache(cube_id, False)
## To do: error handling
#### Not implemented yet
# @celery.task(base=SqlAlchemyTask)
# def unrecognized_activity(cube_id):
# try:
# # Possible theft of key and malicious trading activity
# # Delete all open orders and force update
# cube = Cube.query.get(cube_id)
# log.debug(f'{cube} Unrecognized Activity')
# for conn in cube.connections.values():
# ex = conn.exchange
# creds = get_api_creds(cube, ex)
# args = {**creds, **{'type': 'open'}}
# orders = api_request(cube, 'GET', ex.name, '/orders', args)
# if orders:
# cancel_orders.delay(cube_id, ex, orders)
# db_session.add(cube)
# db_session.commit()
# except SoftTimeLimitExceeded:
# update_cube_cache(cube_id, False)
# ## To do: error handling
@celery.task(base=SqlAlchemyTask)
def trigger_rebalance(cube_id):
try:
cube = Cube.query.get(cube_id)
log.debug(f'{cube} Rebalance triggered')
cube.reallocated_at = datetime.utcnow()
for b in cube.balances:
b.target = None
db_session.add(cube)
db_session.commit()
except SoftTimeLimitExceeded:
update_cube_cache(cube_id, False)
## To do: error handling
def get_start_timestamp(cube, db_tx):
if db_tx:
log.debug(f'{cube} Tranasactions exist, updating from {db_tx.timestamp}')
return int(float(db_tx.timestamp)) + 1
else:
log.debug(f'{cube} Transactions do not exist, updating from account start')
return int(datetime.timestamp(cube.created_at) * 1000) # Convert to milliseconds
def import_trades(cube, ex, creds, since):
now = time() * 1000
days = 10
trades = pd.DataFrame()
url = '/trades'
if ex.name in ['Binance', 'Liquid']:
while since < now:
new_trades = pd.DataFrame()
for bal in cube.balances:
ex_pairs = ExPair.query.filter_by(
exchange_id=ex.id, active=True
).filter(or_(
ExPair.base_currency_id == bal.currency_id,
ExPair.quote_currency_id == bal.currency_id,
)).all()
for ex_pair in ex_pairs:
args = {**creds,
**{
'base': ex_pair.base_symbol,
'quote': ex_pair.quote_symbol,
'limit': 1000,
'since': since
}
}
binance_trades = api_request(cube, 'GET', ex.name, url, args)
binance_trades = pd.read_json(binance_trades)
new_trades = new_trades.append(binance_trades)
sleep(1)
if not new_trades.empty:
new_trades = new_trades.sort_index()
new_trades.timestamp = new_trades.timestamp.astype(np.int64)//10**6
since = int(new_trades.iloc[-1].timestamp) + 1
trades = trades.append(new_trades)
elif since < now:
# 10 days in milliseconds
since = since + 24 * 60 * 60 * days * 1000
else:
break
else:
while since < now:
args = {**creds, **{'since': since}}
new_trades = api_request(cube, 'GET', ex.name, url, args)
new_trades = pd.read_json(new_trades)
if not new_trades.empty:
new_trades.timestamp = new_trades.timestamp.astype(np.int64)//10**6
since = new_trades.iloc[-1].timestamp + 1
trades = trades.append(new_trades)
elif since < now:
# 10 days in milliseconds
since = since + 24 * 60 * 60 * days * 1000
else:
break
if not trades.empty:
# Adjustments to dataframe to match table structure
fee = trades['fee'].apply(pd.Series)
try:
fee = fee.drop(['type'], axis=1)
except:
pass
try:
fee = fee.rename(index=str, columns={'rate': 'fee_rate', 'cost': 'fee_amount', 'currency': 'fee_currency'})
except:
pass
trades = pd.concat([trades, fee], axis=1)
trades = trades.rename(index=str, columns={'id': 'tx_id', 'order': 'order_id', 'amount': 'base_amount', 'cost': 'quote_amount'})
symbol = trades['symbol'].str.split('/', n=1, expand=True)
trades['base_symbol'] = symbol[0]
trades['quote_symbol'] = symbol[1]
trades['trade_type'] = trades['type']
trades['type'] = trades['side']
trades.drop(['side', 'symbol', 'fee'], axis=1, inplace=True)
_, i = | np.unique(trades.columns, return_index=True) | numpy.unique |
"""
Set of programs to read and interact with output from Bifrost
"""
# import builtin modules
import os
import functools
import weakref
from glob import glob
import warnings
import time
import ast
# import external public modules
import numpy as np
from scipy import interpolate
from scipy.ndimage import map_coordinates
# import internal modules
from .load_quantities import *
from .load_arithmetic_quantities import *
from . import load_fromfile_quantities
from .tools import *
from . import document_vars
from . import file_memory
whsp = ' '
class BifrostData(object):
"""
Reads data from Bifrost simulations in native format.
Parameters
----------
file_root - string
Basename for all file names (without underscore!). Snapshot number
will be added afterwards, and directory will be added before.
snap - integer, optional
Snapshot number. If None, will read first snapshot in sequence.
meshfile - string, optional
File name (including full path) for file with mesh. If set
to None (default), a uniform mesh will be created.
fdir - string, optional
Directory where simulation files are. Must be a real path.
verbose - bool, optional
If True, will print out more diagnostic messages
dtype - string, optional
Data type for reading variables. Default is 32 bit float.
big_endian - string, optional
If True, will read variables in big endian. Default is False
(reading in little endian).
ghost_analyse - bool, optional
If True, will read data from ghost zones when this is saved
to files. Default is never to read ghost zones.
cstagop - bool, optional
Use true only if data is too big to load. Danger:
it will do quantity operations without correcting the stagger mesh.
lowbus - bool, optional
Use True only if data is too big to load. It will do cstagger
operations layer by layer using threads (slower).
numThreads - integer, optional
number of threads for certain operations that use parallelism.
fast - whether to read data "fast", by only reading the requested data.
implemented as a flag, with False as default, for backwards
compatibility; some previous codes may have assumed non-requested
data was read. To avoid issues, just ensure you use get_var()
every time you want to have data, and don't assume things exist
(e.g. self.bx) unless you do get_var for that thing
(e.g. get_var('bx')).
Examples
--------
This reads snapshot 383 from simulation "cb24bih", whose file
root is "cb24bih", and is found at directory /data/cb24bih:
>>> a = BifrostData("cb24bih", snap=383, fdir="/data/cb24bih")
Scalar variables do not need de-staggering and are available as
memory map (only loaded to memory when needed), e.g.:
>>> a.r.shape
(504, 504, 496)
Composite variables need to be obtained by get_var():
>>> vx = a.get_var("ux")
"""
snap = None
def __init__(self, file_root, snap=None, meshfile=None, fdir='.',
fast=False, verbose=True, dtype='f4', big_endian=False,
cstagop=True, ghost_analyse=False, lowbus=False,
numThreads=1, params_only=False, sel_units=None,
use_relpath=False, stagger_kind = 'stagger',
iix=None, iiy=None, iiz=None):
"""
Loads metadata and initialises variables.
"""
self.fdir = fdir if use_relpath else os.path.abspath(fdir)
self.verbose = verbose
self.cstagop = cstagop
self.lowbus = lowbus
self.numThreads = numThreads
self.file_root = os.path.join(self.fdir, file_root)
self.root_name = file_root
self.meshfile = meshfile
self.ghost_analyse = ghost_analyse
self.stagger_kind = stagger_kind
self.sel_units = sel_units
self.numThreads = numThreads
self.fast = fast
self._fast_skip_flag = False if fast else None # None-> never skip
setattr(self, document_vars.LOADING_LEVEL, -1) # tells how deep we are into loading a quantity now.
# endianness and data type
if big_endian:
self.dtype = '>' + dtype
else:
self.dtype = '<' + dtype
self.hion = False
self.heion = False
try:
tmp = find_first_match("%s*idl" % file_root, fdir)
except IndexError:
try:
tmp = find_first_match("%s*idl.scr" % file_root, fdir)
except IndexError:
try:
tmp = find_first_match("mhd.in", fdir)
except IndexError:
raise ValueError(("(EEE) init: no .idl or mhd.in files "
"found"))
self.uni = Bifrost_units(filename=tmp, fdir=fdir)
self.set_snap(snap, True, params_only=params_only)
self.set_domain_iiaxes(iix=iix, iiy=iiy, iiz=iiz, internal=False)
self.genvar()
self.transunits = False
self.cross_sect = cross_sect_for_obj(self)
if 'tabinputfile' in self.params.keys():
tabfile = os.path.join(self.fdir, self.get_param('tabinputfile').strip())
if os.access(tabfile, os.R_OK):
self.rhoee = Rhoeetab(tabfile=tabfile, fdir=fdir, radtab=True)
document_vars.create_vardict(self)
document_vars.set_vardocs(self)
def _set_snapvars(self, firstime=False):
"""
Sets list of avaible variables
"""
self.snapvars = ['r', 'px', 'py', 'pz', 'e']
self.auxvars = self.get_param('aux', error_prop=True).split()
if self.do_mhd:
self.snapvars += ['bx', 'by', 'bz']
self.hionvars = []
self.heliumvars = []
if self.get_param('do_hion', default=0) > 0:
self.hionvars = ['hionne', 'hiontg', 'n1',
'n2', 'n3', 'n4', 'n5', 'n6', 'nh2']
self.hion = True
if self.get_param('do_helium', default=0) > 0:
self.heliumvars = ['nhe1', 'nhe2', 'nhe3']
self.heion = True
self.compvars = ['ux', 'uy', 'uz', 's', 'ee']
self.simple_vars = self.snapvars + self.auxvars + self.hionvars + \
self.heliumvars
self.auxxyvars = []
# special case for the ixy1 variable, lives in a separate file
if 'ixy1' in self.auxvars:
self.auxvars.remove('ixy1')
self.auxxyvars.append('ixy1')
self.vars2d = []
# special case for 2D variables, stored in a separate file
for var in self.auxvars:
if any(i in var for i in ('xy', 'yz', 'xz')):
self.auxvars.remove(var)
self.vars2d.append(var)
def set_snap(self, snap, firstime=False, params_only=False):
"""
Reads metadata and sets variable memmap links for a given snapshot
number.
Parameters
----------
snap - integer or array
Number of simulation snapshot to load.
"""
if snap is None:
try:
tmp = sorted(glob("%s*idl" % self.file_root))[0]
snap_string = tmp.split(
self.file_root + '_')[-1].split(".idl")[0]
if snap_string.isdigit():
snap = int(snap_string)
else:
tmp = glob("%s.idl" % self.file_root)
snap = 0
except Exception:
try:
tmp = sorted(glob("%s*idl.scr" % self.file_root))[0]
snap = -1
except IndexError:
try:
tmp = glob("%s.idl" % self.file_root)
snap = 0
except IndexError:
raise ValueError(("(EEE) set_snap: snapshot not defined "
"and no .idl files found"))
def snap_str_from_snap(snap):
if snap == 0:
return ''
else:
return '_%03i' % snap
self.snap = snap
if np.shape(self.snap) != ():
self.snap_str = []
for num in snap:
self.snap_str.append(snap_str_from_snap(num))
else:
self.snap_str = snap_str_from_snap(snap)
self.snapInd = 0
self._read_params(firstime=firstime)
# Read mesh for all snaps because meshfiles could differ
self.__read_mesh(self.meshfile, firstime=firstime)
# variables: lists and initialisation
self._set_snapvars(firstime=firstime)
# Do not call if params_only requested
if(not params_only):
self._init_vars(firstime=firstime)
def _read_params(self, firstime=False):
"""
Reads parameter file (.idl)
"""
if np.shape(self.snap) == ():
snap = [self.snap]
snap_str = [self.snap_str]
else:
snap = self.snap
snap_str = self.snap_str
filename = []
self.paramList = []
for i, num in enumerate(snap):
if num < 0:
filename.append(self.file_root + '.idl.scr')
elif num == 0:
filename.append(self.file_root + '.idl')
else:
filename.append(self.file_root + snap_str[i] + '.idl')
for file in filename:
self.paramList.append(read_idl_ascii(file,firstime=firstime, obj=self))
# assign some parameters as attributes
for params in self.paramList:
for p in ['x', 'y', 'z', 'b']:
try:
setattr(self, 'n' + p, params['m' + p])
except KeyError:
raise KeyError(('read_params: could not find '
'm%s in idl file!' % p))
for p in ['dx', 'dy', 'dz', 'do_mhd']:
try:
setattr(self, p, params[p])
except KeyError:
raise KeyError(('read_params: could not find '
'%s in idl file!' % p))
try:
if params['boundarychk'] == 1:
self.nzb = self.nz + 2 * self.nb
else:
self.nzb = self.nz
except KeyError:
self.nzb = self.nz
# check if units are there, if not use defaults and print warning
unit_def = {'u_l': 1.e8, 'u_t': 1.e2, 'u_r': 1.e-7,
'u_b': 1.121e3, 'u_ee': 1.e12}
for unit in unit_def:
if unit not in params:
default = unit_def[unit]
if hasattr(self, 'uni'):
default = getattr(self.uni, unit, default)
if getattr(self, 'verbose', True):
print("(WWW) read_params:"" %s not found, using "
"default of %.3e" % (unit, default), 2*whsp,
end="\r", flush=True)
params[unit] = default
self.params = {}
for key in self.paramList[0]:
self.params[key] = np.array(
[self.paramList[i][key] for i in range(0, len(self.paramList)) \
if key in self.paramList[i].keys()])
# the if statement is required in case extra params in
# self.ParmList[0]
self.time = self.params['t']
if self.sel_units=='cgs':
self.time *= self.uni.uni['t']
def get_param(self, param, default=None, warning=None, error_prop=None):
''' get param via self.params[param][self.snapInd].
if param not in self.params.keys(), then the following kwargs may play a role:
default: None (default) or any value.
return this value (eventually) instead. (check warning and error_prop first.)
warning: None (default) or any Warning or string.
if not None, do warnings.warn(warning).
error_prop: None (default), True, or any Exception object.
None --> ignore this kwarg.
True --> raise the original KeyError caused by trying to get self.params[param].
else --> raise error_prop from None.
'''
try:
p = self.params[param]
except KeyError as err_triggered:
if (warning is not None) and (self.verbose):
warnings.warn(warning)
if error_prop is not None:
if isinstance(error_prop, BaseException):
raise error_prop from None # "from None" --> show just this error, not also err_triggered
elif error_prop:
raise err_triggered
return default
else:
p = p[self.snapInd]
return p
def __read_mesh(self, meshfile, firstime=False):
"""
Reads mesh file
"""
if meshfile is None:
meshfile = os.path.join(
self.fdir, self.get_param('meshfile', error_prop=True).strip())
if os.path.isfile(meshfile):
f = open(meshfile, 'r')
for p in ['x', 'y', 'z']:
dim = int(f.readline().strip('\n').strip())
assert dim == getattr(self, 'n' + p)
# quantity
setattr(self, p, np.array(
[float(v) for v in f.readline().strip('\n').split()]))
# quantity "down"
setattr(self, p + 'dn', np.array(
[float(v) for v in f.readline().strip('\n').split()]))
# up derivative of quantity
setattr(self, 'd%sid%sup' % (p, p), np.array(
[float(v) for v in f.readline().strip('\n').split()]))
# down derivative of quantity
setattr(self, 'd%sid%sdn' % (p, p), np.array(
[float(v) for v in f.readline().strip('\n').split()]))
f.close()
if self.ghost_analyse:
# extend mesh to cover ghost zones
self.z = np.concatenate((self.z[0] - np.linspace(
self.dz * self.nb, self.dz, self.nb),
self.z, self.z[-1] + np.linspace(
self.dz, self.dz * self.nb, self.nb)))
self.zdn = np.concatenate((self.zdn[0] - np.linspace(
self.dz * self.nb, self.dz, self.nb),
self.zdn, (self.zdn[-1] + np.linspace(
self.dz, self.dz * self.nb, self.nb))))
self.dzidzup = np.concatenate((
| np.repeat(self.dzidzup[0], self.nb) | numpy.repeat |
# -*- coding: utf-8 -*-
"""
Current funcionatilities:
- Lifting line theory
- generate field pressures for Abaqus or other softwares
- air properties calculator
- Reynolds calculator
Created on Mon Jul 20 17:26:19 2015
@author: <NAME>
"""
from __future__ import print_function
from __future__ import absolute_import
import math
import numpy as np
#class Wing():
# def __init__(self, alpha_L_0_root, c_D_xfoil, N=10, b=10., taper=1.,
# chord_root=1, alpha_root=0., V=1.):
# self.alpha_L_0_root = alpha_L_0_root
# self.c_D_xfoil = c_D_xfoil
# self.N = N
# self.b = b
# self.taper = taper
# self.chord_root = chord_root
# self.alpha_root = alpha_root
# self.V = V
#==============================================================================
# Functions that calculate aerodynamic properties based on already calcualted
# aerodynamic properties from other modules
#==============================================================================
def LLT_calculator(alpha_L_0_root, c_D_xfoil, N=10, b=10., taper=1.,
chord_root=1, alpha_root=0., V=1.):
"""
Calculate the coefficients for a Wing.
TODO : - Include elliptical wing
- When alpha_L_0_root = zero, nan!
- Include non rectangular wings
- something else?
"""
def x_theta_converter(input, b, Output='x'):
"""Converts cartesian coordinate in a polar coordinate."""
if Output == 'x':
output = -(b/2) * | np.cos(input) | numpy.cos |
"""Tests for optional shape and type annotation features."""
import jax
import numpy as onp
import pytest
from jax import numpy as jnp
from typing_extensions import Annotated
from jax_dataclasses import EnforcedAnnotationsMixin, pytree_dataclass
@pytree_dataclass
class MnistStruct(EnforcedAnnotationsMixin):
image: Annotated[
jnp.ndarray,
(28, 28),
jnp.floating,
]
label: Annotated[
jnp.ndarray,
(10,),
jnp.integer,
]
@pytree_dataclass
class MnistStructPartial(EnforcedAnnotationsMixin):
image_shape_only: Annotated[
jnp.ndarray,
(28, 28),
]
label_dtype_only: Annotated[
jnp.ndarray,
jnp.integer,
]
def test_valid():
data = MnistStruct(
image=onp.zeros((28, 28), dtype=onp.float32),
label=onp.zeros((10,), dtype=onp.uint8),
)
assert data.get_batch_axes() == ()
data = MnistStruct(
image=onp.zeros((5, 28, 28), dtype=onp.float32),
label=onp.zeros((5, 10), dtype=onp.uint8),
)
assert data.get_batch_axes() == (5,)
data = MnistStruct(
image=onp.zeros((5, 7, 28, 28), dtype=onp.float32),
label=onp.zeros((5, 7, 10), dtype=onp.uint8),
)
assert data.get_batch_axes() == (5, 7)
data = MnistStructPartial(
image_shape_only=onp.zeros((7, 28, 28), dtype=onp.float32),
label_dtype_only=onp.zeros((70), dtype=onp.int32),
)
assert data.get_batch_axes() == (7,)
def test_shape_mismatch():
with pytest.raises(AssertionError):
MnistStruct(
image= | onp.zeros((7, 32, 32), dtype=onp.float32) | numpy.zeros |
import numpy as np
from tqdm import tqdm
from scipy.stats import rankdata
from random import choice
from collections import defaultdict
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.losses import binary_crossentropy
import tensorflow as tf
from random import choices
EPSILON = 1e-6
from rdflib import Graph, URIRef, Literal, Namespace
import rdflib
from rdflib.namespace import XSD, RDF
UNIT = Namespace('http://qudt.org/vocab/unit#')
from tqdm import tqdm
import spacy
VEC_SIZE = 300
def isint(value):
try:
int(value)
return True
except ValueError:
return False
class LiteralConverter:
def __init__(self,g,padding_value=0):
self.g = g
self.non_literal_entities = set(g.subjects()) | set([o for o in g.objects() if isinstance(o,URIRef)])
self.literal_predicates = set([p for p,o in g.predicate_objects() if isinstance(o,Literal)])
self.padding_value = padding_value
self.lang_models = {'xx':spacy.load('xx_ent_wiki_sm'),'en':spacy.load('en_core_web_md')}
def _process_string_literal(self,x):
doc = self.lang_models['en'](str(x))
v = doc.vector
if len(v) < 1:
v = self.padding_value*np.ones((VEC_SIZE,))
return v
def _process_literal(self,x):
if hasattr(x,'datatype') and (x.datatype == XSD['float'] or x.datatype == XSD['double']):
return [float(x)]
if hasattr(x,'datatype') and x.datatype == XSD['date']:
return URIRef('http://examples.org/date/%s' % str(x))
if hasattr(x,'datatype') and x.datatype == XSD['boolean']:
return [1] if bool(x) else [0]
if len(str(x)) == 4 and isint(x):
return URIRef('http://examples.org/date/%s' % str(x))
if hasattr(x,'datatype') and (x.datatype is None or x.datatype == XSD['string']):
return self._process_string_literal(x)
return None
def fit(self):
out = defaultdict(dict)
vec_or_num = {}
array_ps = set()
for i,e in tqdm(enumerate(self.non_literal_entities),total=len(self.non_literal_entities),desc='Processing literals'):
for j,p in enumerate(self.literal_predicates):
tmp = set(self.g.objects(subject = e, predicate = p / RDF.value)) | set(self.g.objects(subject = e, predicate = p))
unit = set(self.g.objects(subject = e, predicate = p / UNIT.units))
for t in tmp:
t = self._process_literal(t)
if t is None:
continue
elif isinstance(t,URIRef):
self.g.add((e,p,t))
else:
out[p][e] = t
if p not in vec_or_num: vec_or_num[p] = len(t)
s=sum(i for k,i in vec_or_num.items())
self.literals = {}
for e in self.non_literal_entities:
tmp = []
for p in self.literal_predicates:
if not p in vec_or_num: continue
if e in out[p]:
tmp.append(np.asarray(out[p][e]).reshape((1,-1)))
else:
tmp.append(self.padding_value*np.ones((1,vec_or_num[p])))
tmp = np.concatenate(tmp,axis=1).reshape((-1,))
assert len(tmp) == s
self.literals[e] = tmp
def transform(self,entities):
return np.asarray([self.literals[e] for e in entities])
def fit_transform(self,entities):
if not hasattr(self,'literals'):
self.fit()
return self.transform(entities)
def load_kg(path):
out = []
with open(path,'r') as f:
for l in f:
l = l.strip().split()
out.append(l)
return out
def generate_negative(kg, N, negative=2, check_kg=False, corrupt_head=True, corrupt_tail=True):
# false triples:
assert corrupt_head or corrupt_tail
R = np.repeat(np.asarray([p for _,p,_ in kg]).reshape((-1,1)),negative,axis=0)
fs = np.random.randint(0,N,size=(negative*len(kg),1))
fo = np.random.randint(0,N,size=(negative*len(kg),1))
negative_kg = np.stack([fs,R,fo],axis=1)
return negative_kg
def oversample_data(kgs,x=None,y=None,testing=False):
if testing:
kgs = [list(kg)[:len(y)] for kg in kgs]
else:
kgs = [list(kg) for kg in kgs]
if y is not None:
m = max(max(map(len,kgs)),len(y))
else:
m = max(map(len,kgs))
out = []
for kg in kgs:
out.append(choices(kg, k=m))
if x is not None and y is not None:
k = np.ceil(m/len(y))
y = np.repeat(y,k,axis=0)[:m]
x = np.repeat(x,k,axis=0)[:m,:]
for s in np.split(x,3,axis=1):
out.append(s.reshape((-1,)))
return [np.squeeze(np.asarray(o)) for o in out], np.asarray(y)
else:
return [np.squeeze(np.asarray(o)) for o in out]
def pad(kg,bs):
kg = list(kg)
while len(kg) % bs != 0:
kg.append(choice(kg))
return np.asarray(kg)
def mrr(target, scores):
scores = sorted(scores, key=lambda x: x[1], reverse=True)
labels = [x for x,_ in scores]
return 1/(1+labels.index(target))
def hits(target, scores, k=10):
scores = sorted(scores, key=lambda x: x[1], reverse=True)
labels = [x for x,_ in scores][:k]
return int(target in labels)
def gen_tail_data(test_data,num_entities,bs,filter_t):
for s,p,o in test_data:
candiate_objects = list(range(num_entities))
candiate_objects.remove(o)
for oi in filter_t[(s,p)]:
candiate_objects.remove(oi)
subjects = np.asarray([[int(s)]]*(len(candiate_objects)+1))
predicates = np.asarray([[int(p)]]*(len(candiate_objects)+1))
objects = np.asarray([[int(o)]] + [[ent_id] for ent_id in candiate_objects])
triples = np.concatenate((subjects,predicates,objects),axis=-1)
yield triples.reshape((-1,3))
def gen_head_data(test_data,num_entities,bs,filter_h):
for s,p,o in test_data:
candiate_subjects = list(range(num_entities))
candiate_subjects.remove(s)
for si in filter_h[(p,o)]:
candiate_subjects.remove(si)
objects = np.asarray([[int(o)]]*(len(candiate_subjects)+1))
predicates = np.asarray([[int(p)]]*(len(candiate_subjects)+1))
subjects = np.asarray([[int(s)]] + [[ent_id] for ent_id in candiate_subjects])
triples = np.concatenate((subjects,predicates,objects),axis=-1)
yield triples.reshape((-1,3))
def validate(model, test_data, num_entities, bs, filtering_triples = None):
filter_h = defaultdict(set)
filter_t = defaultdict(set)
for s,p,o in filtering_triples:
filter_h[(p,o)].add(s)
filter_t[(s,p)].add(o)
c_1, c_3, c_10 = 0,0,0
mean_ranks = []
for t in tqdm(gen_tail_data(test_data,num_entities,bs,filter_t),total=len(test_data)):
res = np.asarray(model.predict(t)).reshape((-1,))
r = rankdata(res,'max')
target_rank = r[0]
num_candidate = len(res)
real_rank = num_candidate - target_rank + 1
c_1 += 1 if target_rank == num_candidate else 0
c_3 += 1 if target_rank + 3 > num_candidate else 0
c_10 += 1 if target_rank + 10 > num_candidate else 0
mean_ranks.append(real_rank)
tail_hit_at_1 = c_1 / float(len(test_data))
tail_hit_at_3 = c_3 / float(len(test_data))
tail_hit_at_10 = c_10 / float(len(test_data))
tail_avg_rank = | np.mean(mean_ranks) | numpy.mean |
import numpy as np
import pickle
import time
from random import uniform
import torch
from torchvision import datasets
import torchvision
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def prepro(x):
""" preprocessing for images only"""
return x.astype(np.float).ravel() / 255
def softmax(x):
exp_vals = np.exp(x - np.max(x, axis=1, keepdims=True))
probs = exp_vals / np.sum(exp_vals, axis=1, keepdims=True)
return probs
def relu(x):
return np.maximum(0, x)
def drelu(inputs, dinputs):
dinputs[inputs <= 0] = 0
return dinputs
def optimize(model, grad_buffer, lr):
""" Vanilla Stochastic Gradient Descent """
for k, v in model.items():
model[k] -= lr * grad_buffer[k]
def init_grad_buffer(model):
grad_buffer = {}
for k, v in model.items():
grad_buffer[f'{k}'] = | np.zeros_like(v) | numpy.zeros_like |
'''Module with classes and methods to analyse and process exported geomodel grids
Created on 21/03/2014
@author: <NAME> (some parts originally developed by <NAME>)
'''
import numpy as np
#import pynoddy
import subprocess
import os.path
import platform
try:
import matplotlib.pyplot as plt
except ImportError:
print("\n\n\tMatplotlib not installed - plotting functions will not work!\n\n\n")
# import mpl_toolkits
# from matplotlib.ticker import MultipleLocator, FormatStrFormatter
# to convert python variable types to cpp types
import ctypes
# to create array
from numpy.ctypeslib import ndpointer
# to create folder
import os
# read out and change xml file (here only used to read out model boundary information)
import geomodeller_xml_obj as GO
class GeoGrid():
"""Object definition for exported geomodel grids"""
def __init__(self, **kwds):
"""GeoGrid contains methods to load, analyse, and process exported geomodel grids
**Optional Keywords**:
- *grid_filename* = string : filename of exported grid
- *delxyz_filename* = string : file with model discretisation
- *dimensions_filename* = string : file with model dimension (coordinates)
"""
if kwds.has_key('grid_filename'):
self.grid_filename = kwds['grid_filename']
if kwds.has_key('delxyz_filename'):
self.delxyz_filename = kwds['delxyz_filename']
if kwds.has_key('dimensions_filename'):
self.dimensions_filename = kwds['dimensions_filename']
def __add__(self, G_other):
"""Combine grid with another GeoGrid if regions are overlapping"""
# check overlap
print (self.ymin, self.ymax)
print (G_other.ymin, G_other.ymax)
if (G_other.ymin < self.ymax and G_other.ymin > self.ymin):
print("Grids overlapping in y-direction between %.0f and %.0f" %
(G_other.ymin, self.ymax))
def load_grid(self):
"""Load exported grid, discretisation and dimensions from file"""
if not hasattr(self, 'grid_filename'):
raise AttributeError("Grid filename is not defined!")
self.grid = np.loadtxt(self.grid_filename,
delimiter = ',',
dtype='int',
unpack=False)
if hasattr(self, 'delxyz_filename'):
self.load_delxyz(self.delxyz_filename)
self.adjust_gridshape()
if hasattr(self, 'dimensions_filename'):
self.load_dimensions(self.dimensions_filename)
def load_delxyz(self, delxyz_filename):
"""Load grid discretisation from file"""
del_lines = open(delxyz_filename, 'r').readlines()
d0 = del_lines[0].split("*")
self.delx = np.array([float(d0[1]) for _ in range(int(d0[0]))])
d1 = del_lines[1].split("*")
self.dely = np.array([float(d1[1]) for _ in range(int(d1[0]))])
d2 = del_lines[2].split(",")[:-1]
self.delz = np.array([float(d) for d in d2])
(self.nx, self.ny, self.nz) = (len(self.delx), len(self.dely), len(self.delz))
(self.extent_x, self.extent_y, self.extent_z) = (sum(self.delx), sum(self.dely), sum(self.delz))
def set_delxyz(self, delxyz):
"""Set delx, dely, delz arrays explicitly and update additional attributes
**Arguments**:
- *delxyz* = (delx-array, dely-array, delz-array): arrays with cell dimensions
"""
self.delx, self.dely, self.delz = delxyz
(self.nx, self.ny, self.nz) = (len(self.delx), len(self.dely), len(self.delz))
(self.extent_x, self.extent_y, self.extent_z) = (sum(self.delx), sum(self.dely), sum(self.delz))
def set_basename(self, name):
"""Set basename for grid exports, etc.
**Arguments**:
- *name* = string: basename
"""
self.basename = name
def load_dimensions(self, dimensions_filename):
"""Load project dimensions from file"""
dim = [float(d) for d in open(dimensions_filename, 'r').readlines()[1].split(",")]
(self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax) = dim
# calculate cell centre positions in real world coordinates
def define_regular_grid(self, nx, ny, nz):
"""Define a regular grid from defined project boundaries and given discretisations"""
self.nx = nx
self.ny = ny
self.nz = nz
self.delx = np.ones(nx) * (self.xmax - self.xmin) / nx
self.dely = np.ones(ny) * (self.ymax - self.ymin) / ny
self.delz = np.ones(nz) * (self.zmax - self.zmin) / nz
# create (empty) grid object
self.grid = np.ndarray((nx, ny, nz))
# update model extent
(self.extent_x, self.extent_y, self.extent_z) = (sum(self.delx), sum(self.dely), sum(self.delz))
def define_irregular_grid(self, delx, dely, delz):
"""Set irregular grid according to delimter arrays in each direction"""
self.delx = delx
self.dely = dely
self.delz = delz
self.nx = len(delx)
self.ny = len(dely)
self.nz = len(delz)
# create (empty) grid object
self.grid = np.ndarray((self.nx, self.ny, self.nz))
# update model extent
(self.extent_x, self.extent_y, self.extent_z) = (sum(self.delx), sum(self.dely), sum(self.delz))
def get_dimensions_from_geomodeller_xml_project(self, xml_filename):
"""Get grid dimensions from Geomodeller project
**Arguments**:
- *xml_filename* = string: filename of Geomodeller XML file
"""
# Note: this implementation is based on the Geomodeller API
# The boundaries could theoretically also be extracted from the XML file
# directly, e.g. using the geomodeller_xml_obj module - but this would
# require an additional module being loaded, so avoid here!
filename_ctypes = ctypes.c_char_p(xml_filename)
# get model boundaries
#Detection of operative system:
if platform.system() == "Linux":
lib = ctypes.CDLL('./libgeomod.so') #linux
elif platform.system() == "Windows":
lib = ctypes.windll.LoadLibrary(os.path.dirname(os.path.abspath(__file__)) + os.path.sep +"libgeomodwin_leak6.dll") #windows
else:
print("Your operative system is not supported")
lib.get_model_bounds.restype = ndpointer(dtype=ctypes.c_int, shape=(6,))
boundaries = lib.get_model_bounds(filename_ctypes)
(self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax) = boundaries
self.extent_x = self.xmax - self.xmin
self.extent_y = self.ymax - self.ymin
self.extent_z = self.zmax - self.zmin
# f
def update_from_geomodeller_project(self, xml_filename):
"""Update grid properties directly from Geomodeller project
**Arguments**:
- *xml_filename* = string: filename of Geomodeller XML file
"""
filename_ctypes = ctypes.c_char_p(xml_filename)
#print filename_ctypes
# create cell position list with [x0, y0, z0, ... xn, yn, zn]
cell_position = []
ids = []
# check if cell centers are defined - if not, do so!
if not hasattr(self, 'cell_centers_x'):
if cell_position == []:
self.determine_cell_centers()
for k in range(self.nz):
for j in range(self.ny):
for i in range(self.nx):
cell_position.append(self.cell_centers_x[i])
cell_position.append(self.cell_centers_y[j])
cell_position.append(self.cell_centers_z[k])
ids.append((i,j,k))
# prepare variables for cpp function
#print cell_position
coord_ctypes = (ctypes.c_double * len(cell_position))(*cell_position)
coord_len = len(cell_position)
# call cpp function
#Detection of operative system:
if platform.system() == "Linux":
lib = ctypes.CDLL('./libgeomod.so') #linux
elif platform.system() == "Windows":
lib = ctypes.windll.LoadLibrary(os.path.dirname(os.path.abspath(__file__)) + os.path.sep +"libgeomodwin_leak6.dll") #windows
else:
print("Your operative system is not supported")
#print coord_len
lib.compute_irregular_grid.restype = ndpointer(dtype=ctypes.c_int, shape=(coord_len/3,))
# This is the function wich needs GPU!!!
formations_raw = lib.compute_irregular_grid(filename_ctypes, coord_ctypes, coord_len)
# re-sort formations into array
# print formations_raw
for i in range(len(formations_raw)):
self.grid[ids[i][0],ids[i][1],ids[i][2]] = formations_raw[i]
def set_densities(self, densities):
"""Set layer densities
**Arguments**:
- *densities* = dictionary of floats: densities for geology ids
"""
self.densities = densities
def set_sus(self, sus):
"""Set layer susceptibilities
**Arguments**:
- *us* = dictionary of floats: magnetic susceptibilities for geology ids
"""
self.sus = sus
def write_noddy_files(self, **kwds):
"""Create Noddy block model files (for grav/mag calculation)
**Optional keywords**:
- *gps_range* = float : set GPS range (default: 1200.)
Method generates the files required to run the forward gravity/ magnetics response
from the block model:
- model.g00 = file with basic model information
- model.g12 = discretised geological (block) model
- base.his = Noddy history file with some basic settings
"""
self.gps_range = kwds.get("gps_range", 1200.)
if not hasattr(self, 'basename'):
self.basename = "geogrid"
f_g12 = open(self.basename + ".g12", 'w')
f_g01 = open(self.basename + ".g00", 'w')
# method = 'numpy' # using numpy should be faster - but it messes up the order... possible to fix?
# if method == 'standard':
# i = 0
# j = 0
# k = 0
# self.block = np.ndarray((self.nx,self.ny,self.nz))
# for line in f.readlines():
# if line == '\n':
# # next z-slice
# k += 1
# # reset x counter
# i = 0
# continue
# l = [int(l1) for l1 in line.strip().split("\t")]
# self.block[i,:,self.nz-k-1] = np.array(l)[::-1]
# i += 1
if not hasattr(self, "unit_ids"):
self.determine_geology_ids()
#=======================================================================
# # create file with base settings (.g00)
#=======================================================================
f_g01.write("VERSION = 7.11\n")
f_g01.write("FILE PREFIX = " + self.basename + "\r\n")
import time
t = time.localtime() # get current time
f_g01.write("DATE = %d/%d/%d\n" % (t.tm_mday, t.tm_mon, t.tm_year))
f_g01.write("TIME = %d:%d:%d\n" % (t.tm_hour, t.tm_min, t.tm_sec))
f_g01.write("UPPER SW CORNER (X Y Z) = %.1f %.1f %.1f\n" % (self.xmin - self.gps_range,
self.ymin - self.gps_range,
self.zmax))
f_g01.write("LOWER NE CORNER (X Y Z) = %.1f %.1f %.1f\n" % (self.xmax + self.gps_range,
self.ymax + self.gps_range,
self.zmin))
f_g01.write("NUMBER OF LAYERS = %d\n" % self.nz)
for k in range(self.nz):
f_g01.write("\tLAYER %d DIMENSIONS (X Y) = %d %d\n" % (k,
self.nx + 2 * (self.gps_range / self.delx[0]),
self.ny + 2 * (self.gps_range / self.dely[0])))
f_g01.write("NUMBER OF CUBE SIZES = %d\n" % self.nz)
for k in range(self.nz):
f_g01.write("\tCUBE SIZE FOR LAYER %d = %d\n" % (k, self.delx[0]))
f_g01.write("CALCULATION RANGE = %d\n" % (self.gps_range / self.delx[0]))
f_g01.write("""INCLINATION OF EARTH MAG FIELD = -67.00
INTENSITY OF EARTH MAG FIELD = 63000.00
DECLINATION OF VOL. WRT. MAG NORTH = 0.00
DENSITY CALCULATED = Yes
SUSCEPTIBILITY CALCULATED = Yes
REMANENCE CALCULATED = No
ANISOTROPY CALCULATED = No
INDEXED DATA FORMAT = Yes
""")
f_g01.write("NUM ROCK TYPES = %d\n" % len(self.unit_ids))
for i in self.unit_ids:
f_g01.write("ROCK DEFINITION Layer %d = %d\n" % (i, i))
f_g01.write("\tDensity = %f\n" % self.densities[int(i)])
f_g01.write("\tSus = %f\n" % self.sus[int(i)])
#=======================================================================
# Create g12 file
#=======================================================================
# write geology blocks to file
for k in range(self.nz):
# this worked for geophysics, but not for re-import with pynoddy:
# for val in self.grid[:,:,k].ravel(order = 'A'):
# f_g12.write("%d\t" % val)
for i in range(self.nx):
for val in self.grid[i,:,k]:
f_g12.write("%d\t" % val)
f_g12.write("\r\n")
# f_g12.write(['%d\t' % i for i in self.grid[:,:,k].ravel()])
f_g12.write("\r\n")
f_g12.close()
f_g01.close()
#=======================================================================
# # create noddy history file for base settings
#=======================================================================
import pynoddy.history
history = self.basename + "_base.his"
nm = pynoddy.history.NoddyHistory('simple_two_faults.his')
#print nm
# add stratigraphy
# create dummy names and layers for base stratigraphy
layer_names = []
layer_thicknesses = []
for i in self.unit_ids:
layer_names.append('Layer %d' % i)
layer_thicknesses.append(500)
strati_options = {'num_layers' : len(self.unit_ids),
'layer_names' : layer_names,
'layer_thickness' : layer_thicknesses}
nm.add_event('stratigraphy', strati_options, )
# set grid origin and extent:
nm.set_origin(self.xmin, self.ymin, self.zmin)
nm.set_extent(self.extent_x, self.extent_y, self.extent_z)
nm.write_history(history)
def analyse_geophysics(self, densities, **kwds):
"""Simulate potential-fields and use for model analysis
It is possible to directly define filter for processing of gravity
**Arguments**:
- *model_dir*: directory containing sub-directories with uncertainty runs
(uncertainty_run_01, etc.);
**Optional keywords**:
- *grav_min* = float : reject models with a grav value lower than this
- *grav_max* = float : reject models with a grav value larger than this
"""
#os.chdir(model_dir)
all_gravs = []
all_gravs_filtered = []
all_mags = []
all_mags_filtered = []
all_probs = {}
all_probs_filtered = {}
i_all = 0
i_filtered = 0
used_grids = []
used_grids_filtered = []
f = self
#for f in os.listdir(model_dir):
# if os.path.splitext(f)[1] == ".pkl" and "Sandstone" in f:
#===================================================================
# Load grid
#===================================================================
# grid_file = open(os.path.join(model_dir, f), "r")
# grid_ori = pickle.load(grid_file)
# grid_file.close()
#===================================================================
# Extract subgrid
#===================================================================
# subrange = (40,200,30,250,0,80)
grid = self
# grid = grid_ori
# substitute 0 with something else in grid ids
tmp_grid = np.zeros_like(grid.grid)
tmp_grid[grid.grid == 0] += 1
#print tmp_grid.shape
# grid.set_basename(self.split(".")[0])
# print "Basename"
# print grid.basename
grid.grid += tmp_grid
# n_cells = np.prod(grid.grid.shape)
grid.determine_geology_ids()
#===================================================================
# # set densities and magnetic susceptibilities
#===================================================================
#densities = dens
#densities = {0: 0.1,
# 1: 2610,
# 2: 2920,
# 3: 3100,
# 4: 2920,
# 5: 2610}
sus = {0: 0.001,
1: 0.001,
2: 0.001,
3: 0.1,
4: 0.001,
5: 0.001}
grid.set_densities(densities)
grid.set_sus(sus)
# print grid
grid.write_noddy_files(gps_range = 0.0)
# print grid.unit_ids
sim_type = "ANOM_FROM_BLOCK"
history = grid.basename + "_base.his"
output_name = grid.basename
#import pdb
#pdb.set_trace()
# save grid as vtk for testing:
# grid_ori.export_to_vtk(vtk_filename = grid.basename)
#===================================================================
# Run gravity forward modeling
#===================================================================
out = subprocess.Popen(['noddy.exe', history, output_name, sim_type],
shell=True, stderr=subprocess.PIPE,
stdout=subprocess.PIPE).stdout.read()
#===================================================================
# Initiate probability grids
#===================================================================
if i_all == 0:
vals = grid.unit_ids
for val in vals:
if not all_probs.has_key(val):
all_probs[val] = np.zeros_like(grid.grid, dtype = "float")
all_probs_filtered[val] = np.zeros_like(grid.grid, dtype = "float")
#===================================================================
# Create plot and store data
#===================================================================
self.geophys = pynoddy.output.NoddyGeophysics(grid.basename)
#return geophys
"""
#=================================================
# Check gravity constraints
#=================================================
filter_val = True
if kwds.has_key("grav_max"):
if np.max(geophys.grv_data) > kwds['grav_max']:
filter_val = False
if kwds.has_key("grav_min"):
if np.min(geophys.grv_data) < kwds['grav_min']:
filter_val = False
if filter_val:
all_gravs_filtered.append(geophys.grv_data)
all_mags_filtered.append(geophys.mag_data)
used_grids_filtered.append("%s/%s" % (model_dir, grid.basename))
# test_grid = np.zeros_like(grid.grid)
for val in vals:
all_probs_filtered[val] += (grid.grid == val)
# test_grid += grid.grid == val
# check probability
# assert(np.sum(test_grid) == n_cells)
i_filtered += 1
all_gravs.append(geophys.grv_data)
all_mags.append(geophys.mag_data)
used_grids.append("%s/%s" % (model_dir, grid.basename))
# test_grid = np.zeros_like(grid.grid)
for val in vals:
all_probs[val] += (grid.grid == val)
# test_grid += grid.grid == val
# assert(np.sum(test_grid) == n_cells)
i_all += 1
#===================================================================
# Export to vtk for test
#===================================================================
# grid_out = pynoddy.output.NoddyOutput(grid.basename)
# grid_out.export_to_vtk(vtk_filename = grid.basename)
#=======================================================================
# Analyse statistics for all simulated grids
#=======================================================================
# all_gravs = np.array(all_gravs)
return all_gravs, all_mags, used_grids, all_probs, i_all,\
all_gravs_filtered, all_mags_filtered, used_grids_filtered, all_probs_filtered, i_filtered
# f_all = open("all_gravs.pkl", 'w')
# pickle.dump(all_gravs, f_all)
# f_all.close()
# return all_gravs
"""
def set_dimensions(self, **kwds):
"""Set model dimensions, if no argument provided: xmin = 0, max = sum(delx) and accordingly for y,z
**Optional keywords**:
- *dim* = (xmin, xmax, ymin, ymax, zmin, zmax) : set dimensions explicitly
"""
if kwds.has_key("dim"):
(self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax) = kwds['dim']
else:
self.xmin, self.ymin, self.zmin = (0., 0., 0.)
self.xmax, self.ymax, self.zmax = (sum(self.delx), sum(self.dely), sum(self.delz))
def determine_cell_centers(self):
"""Determine cell centers for all coordinate directions in "real-world" coordinates"""
if not hasattr(self, 'xmin'):
raise AttributeError("Please define grid dimensions first")
sum_delx = np.cumsum(self.delx)
sum_dely = np.cumsum(self.dely)
sum_delz = np.cumsum(self.delz)
self.cell_centers_x = np.array([sum_delx[i] - self.delx[i] / 2. for i in range(self.nx)]) + self.xmin
self.cell_centers_y = np.array([sum_dely[i] - self.dely[i] / 2. for i in range(self.ny)]) + self.ymin
self.cell_centers_z = np.array([sum_delz[i] - self.delz[i] / 2. for i in range(self.nz)]) + self.zmin
def determine_cell_boundaries(self):
"""Determine cell boundaries for all coordinates in "real-world" coordinates"""
if not hasattr(self, 'xmin'):
raise AttributeError("Please define grid dimensions first")
sum_delx = np.cumsum(self.delx)
sum_dely = np.cumsum(self.dely)
sum_delz = np.cumsum(self.delz)
self.boundaries_x = np.ndarray((self.nx+1))
self.boundaries_x[0] = 0
self.boundaries_x[1:] = sum_delx
self.boundaries_y = np.ndarray((self.ny+1))
self.boundaries_y[0] = 0
self.boundaries_y[1:] = sum_dely
self.boundaries_z = np.ndarray((self.nz+1))
self.boundaries_z[0] = 0
self.boundaries_z[1:] = sum_delz
# create a list with all bounds
self.bounds = [self.boundaries_y[0], self.boundaries_y[-1],
self.boundaries_x[0], self.boundaries_x[-1],
self.boundaries_z[0], self.boundaries_z[-1]]
def adjust_gridshape(self):
"""Reshape numpy array to reflect model dimensions"""
self.grid = np.reshape(self.grid, (self.nz, self.ny, self.nx))
self.grid = np.swapaxes(self.grid, 0, 2)
# self.grid = np.swapaxes(self.grid, 0, 1)
def plot_section(self, direction, cell_pos='center', **kwds):
"""Plot a section through the model in a given coordinate direction
**Arguments**:
- *direction* = 'x', 'y', 'z' : coordinate direction for section position
- *cell_pos* = int/'center','min','max' : cell position, can be given as
value of cell id, or as 'center' (default), 'min', 'max' for simplicity
**Optional Keywords**:
- *cmap* = mpl.colormap : define colormap for plot (default: jet)
- *alpha* = define the transparencie for the plot (default: 1)
- *colorbar* = bool: attach colorbar (default: True)
- *geomod_coord* = bool: Plotting geomodeller coordinates instead voxels (default: False)
- *contour* = bool : Plotting contour of the layers contact
- *plot layer* = array: Layer Number you want to plot in the contour plot
- *rescale* = bool: rescale color bar to range of visible slice (default: False)
- *ve* = float : vertical exageration (for plots in x,y-direction)
- *figsize* = (x,y) : figsize settings for plot
- *ax* = matplotlib.axis : add plot to this axis (default: new axis)
if axis is defined, the axis is returned and the plot not shown
Note: if ax is passed, colorbar is False per default!
- *savefig* = bool : save figure to file (default: show)
- *fig_filename* = string : filename to save figure
"""
#TO DO:
# Fix colorbar max and min
# - Colorbar in contourplots
colorbar = kwds.get('colorbar', True)
cmap = kwds.get('cmap', 'jet')
alpha = kwds.get('alpha', 1)
rescale = kwds.get('rescale', False)
ve = kwds.get('ve', 1.)
figsize = kwds.get('figsize', (8,4))
geomod_coord = kwds.get('geomod_coord', False)
contour = kwds.get('contour', False)
linewidth = kwds.get("linewidth", 1)
levels = kwds.get("plot_layer", None)
if not kwds.has_key('ax'):
colorbar = kwds.get('colorbar', True)
# create new axis for plot
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
else:
colorbar = False
ax = kwds['ax']
if direction == 'x':
if type(cell_pos) == str:
# decipher cell position
if cell_pos == 'center' or cell_pos == 'centre':
pos = self.nx / 2
elif cell_pos == 'min':
pos = 0
elif cell_pos == 'max':
pos = self.nx
else:
pos = cell_pos
grid_slice = self.grid[pos,:,:]
grid_slice = grid_slice.transpose()
#print grid_slice
aspect = self.extent_z/self.extent_x * ve
if geomod_coord:
ax.set_xticks(np.linspace(0,self.ny-1,6, endpoint = True, dtype = int))
ax.set_yticks(np.linspace(0,self.nz-1,6, endpoint = True, dtype = int))
ax.set_xticklabels(np.linspace(self.ymin,self.ymax,6,dtype = int, endpoint = True ))
ax.set_yticklabels(np.linspace(self.zmin,self.zmax,6,dtype = int, endpoint = True ))
ax.set_ylabel("z[m]")
ax.set_xlabel("y[m]")
else:
ax.set_ylabel("z[voxels]")
ax.set_xlabel("y[voxels]")
if contour:
ry = np.arange(self.nz)
rx = np.arange(self.ny)
elif direction == 'y':
if type(cell_pos) == str:
# decipher cell position
if cell_pos == 'center' or cell_pos == 'centre':
pos = self.ny / 2
elif cell_pos == 'min':
pos = 0
elif cell_pos == 'max':
pos = self.ny
else:
pos = cell_pos
grid_slice = self.grid[:,pos,:]
grid_slice = grid_slice.transpose()
aspect = self.extent_z/self.extent_y * ve
if geomod_coord:
#print np.linspace(0,self.extent_x,11), np.linspace(0,self.extent_x,11, endpoint = True)
ax.set_xticks(np.linspace(0,self.nx-1,6, endpoint = True, dtype = int))
ax.set_yticks(np.linspace(0,self.nz-1,6, endpoint = True, dtype = int))
ax.set_xticklabels(np.linspace(self.xmin,self.xmax,6, endpoint = True, dtype = int))
ax.set_yticklabels(np.linspace(self.zmin,self.zmax,6, dtype = int,endpoint = True ))
#ax.invert_yaxis
ax.set_ylabel("z[m]")
ax.set_xlabel("x[m]")
else:
ax.set_ylabel("z[voxels]")
ax.set_xlabel("x[voxels]")
if contour:
ry = np.arange(self.nz)
rx = np.arange(self.nx)
elif direction == 'z' :
if type(cell_pos) == str:
# decipher cell position
if cell_pos == 'center' or cell_pos == 'centre':
pos = self.nz / 2
elif cell_pos == 'min':
pos = 0
elif cell_pos == 'max':
pos = self.nz
else:
pos = cell_pos
grid_slice = self.grid[:,:,pos].transpose()
aspect = 1.
# setting labels
if geomod_coord:
# print self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax
# print np.linspace(self.xmin,self.xmax,6, endpoint = False, dtype = int)
# print np.linspace(0,self.extent_y,6, endpoint = False, dtype = int)
ax.set_xticks(np.linspace(0,self.nx-1,6,dtype = int, endpoint = True ))
ax.set_yticks(np.linspace(0,self.ny-1,6, endpoint = True, dtype = int))
ax.set_xticklabels(np.linspace(self.xmin,self.xmax,6,dtype = int, endpoint = True ))
ax.set_yticklabels(np.linspace(self.ymin,self.ymax,6,dtype = int, endpoint = True ))
ax.set_ylabel("y[m]")
ax.set_xlabel("x[m]")
else:
ax.set_ylabel("y[voxels]")
ax.set_xlabel("x[voxels]")
if contour:
ry = np.arange(self.ny)
rx = np.arange(self.nx)
if not hasattr(self, 'unit_ids'):
self.determine_geology_ids()
if rescale:
vmin = np.min(grid_slice)
vmax = np.max(grid_slice)
else: # use global range for better comparison
vmin = min(self.unit_ids)
vmax = max(self.unit_ids)
if contour:
Rx, Ry = np.meshgrid(rx, ry)
#print np.amax(grid_slice)
im = ax.contour(Rx, Ry, grid_slice, int( | np.amax(grid_slice) | numpy.amax |
import pickle
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(suppress=True)
"""
Load recordings
"""
indxs = {}
logged_state_names = [
'time_step', 'episode_id', 'veh_id', 'trace', 'glob_x',
'speed', 'act_long', 'min_delta_x', 'att_real']
index = 0
for item_name in logged_state_names:
indxs[item_name] = index
index += 1
real_collections = {}
ima_collections = {}
collision_logs = {}
runtimes = {}
model_names = ['mlp_05', 'lstm_05', 'latent_mlp_22', 'neural_045', 'neural_idm_367']
mc_run_name = 'rwse'
for model_name in model_names:
exp_dir = './src/evaluation/mc_collections/'+ mc_run_name + '/' + model_name
with open(exp_dir+'/real_collection.pickle', 'rb') as handle:
real_collections[model_name] = pickle.load(handle)
with open(exp_dir+'/ima_collection.pickle', 'rb') as handle:
ima_collections[model_name] = pickle.load(handle)
with open(exp_dir+'/runtime.pickle', 'rb') as handle:
runtimes[model_name] = pickle.load(handle)
try:
with open(exp_dir+'/collision_log.pickle', 'rb') as handle:
collision_logs[model_name] = pickle.load(handle)
except:
collision_logs[model_name] = []
model_paper_name = {} # model names used for paper
klplot_legend_names = {}
names = ['MLP', 'LSTM', 'Latent-MLP', 'CVAE', 'NIDM']
for i, model_name in enumerate(model_names):
model_paper_name[model_name] = names[i]
names = ['Headway', 'Speed', 'Long. Acceleration']
for i, _name in enumerate(['min_delta_x', 'speed', 'act_long']):
klplot_legend_names[_name] = names[i]
def kl(p, q):
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
return np.sum(p*np.log(p/q))
def get_state_kl(trajs, bins):
"""
Returns kl divergence for two historgrams formed of
piecewise uniform rectacngles with n bins of equal width.
"""
EPSILON = 1e-7# to avoid zeros
true_traj, pred_traj = trajs
pred_hist, bin_edges = np.histogram(pred_traj, bins)
true_hist, _ = np.histogram(true_traj, bin_edges)
pred_hist = pred_hist + EPSILON
true_hist = true_hist + EPSILON
bin_width = 1/pred_hist.sum()
pred_prob = pred_hist*bin_width
bin_width = 1/true_hist.sum()
true_prob = true_hist*bin_width
kl_val = kl(true_prob, pred_prob)
return kl_val
"""
Each trajectory snippet is rollout_len time steps long
"""
rollout_len = 100
snips_true = {}
snips_pred = {}
for model_name in model_names:
snips_true[model_name] = [] # shape: (car_count, traces_n, rollout_len, 8)
snips_pred[model_name] = [] # shape: (car_count, 1, rollout_len, 9)
for model_name in model_names:
for epis_id, epis_dic in real_collections[model_name].items():
for veh_id, veh_dic in real_collections[model_name][epis_id].items():
_true = | np.array(real_collections[model_name][epis_id][veh_id]) | numpy.array |
import pytest
import gpmap
from epistasis import models
import numpy as np
import pandas as pd
import os
def test__genotypes_to_X(test_data):
# Make sure function catches bad genotype passes
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
# Duplicated
g = list(gpm.genotype)
g.extend(g)
# not in gpmap
b = list(gpm.genotype)
b.append("stupid")
bad_genotypes = [g,b]
for bad in bad_genotypes:
with pytest.raises(ValueError):
models.base._genotypes_to_X(bad,gpm,order=1,model_type="local")
# Sample through various model comobos
allowed = {"local":set([0,1]),
"global":set([-1,1])}
for d in test_data:
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
for i in range(1,gpm.length+1,1):
for model_type in ["local","global"]:
X = models.base._genotypes_to_X(gpm.genotype,
gpm,
order=i,
model_type=model_type)
assert X.shape[0] == len(gpm.genotype)
assert set(np.unique(X)).issubset(allowed[model_type])
def test_arghandler_decorator():
class Yo:
def _a(self,data=5,method=None):
return data
def _b(self,data=None,method=None):
return 6
@models.base.arghandler
def test_method(self,a=None,b=None,**kwargs):
return a, b
@models.base.arghandler
def bad_method(self,c=None,d=None,**kwargs):
return c, d
yo = Yo()
assert yo.test_method() == (None,6)
assert yo.test_method(a=5) == (5,6)
assert yo.test_method(a=10) == (10,6)
assert yo.test_method(b=10) == (None,6)
with pytest.raises(AttributeError):
yo.bad_method()
### Tests for AbstractModel:
# AbstractModel cannot be instantiated on its own, as it is designed to be a
# mixin with sklearn classes. Many methods have to be defined in subclass
# (.fit, .predict, etc.) These will not be tested here, but instead in the
# subclass tests. For methods defined here that are never redefined in subclass
# (._X, .add_gpm, etc.) we test using the simplest mixed/subclass
# (EpistasisLinearRegression).
def test_abstractmodel_predict_to_df(test_data):
"""
Test basic functionality. Real test of values will be done on .predict
for subclasses.
"""
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
# This should fail -- no fit run
with pytest.raises(Exception):
df = m.predict_to_df()
m.fit()
# This should work
df = m.predict_to_df()
assert type(df) is type(pd.DataFrame())
assert len(df) == len(d["genotype"])
# Create and fit a new model.
m = models.linear.EpistasisLinearRegression()
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
# No gpm added -- should fail
with pytest.raises(RuntimeError):
m.predict_to_df()
m.add_gpm(gpm)
m.fit()
df = m.predict_to_df(genotypes=d["genotype"][0])
assert len(df) == 1
bad_stuff = [1,{},[1,2],"STUPID",["STUPID","IS","REAL"]]
for b in bad_stuff:
with pytest.raises(ValueError):
print(f"Trying bad genotypes {b}")
m.predict_to_df(genotypes=b)
df = m.predict_to_df(genotypes=d["genotype"][:3])
assert len(df) == 3
def test_abstractmodel_predict_to_csv(test_data,tmp_path):
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
m.fit()
csv_file = os.path.join(tmp_path,"tmp.csv")
m.predict_to_csv(filename=csv_file)
assert os.path.exists(csv_file)
df = pd.read_csv(csv_file)
assert len(df) == len(d["genotype"])
# Make sure genotypes pass works
m.predict_to_csv(filename=csv_file,genotypes=d["genotype"][0])
assert os.path.exists(csv_file)
df = pd.read_csv(csv_file)
assert len(df) == 1
def test_abstractmodel_predict_to_excel(test_data,tmp_path):
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
m.fit()
excel_file = os.path.join(tmp_path,"tmp.xlsx")
m.predict_to_excel(filename=excel_file)
assert os.path.exists(excel_file)
df = pd.read_excel(excel_file)
assert len(df) == len(d["genotype"])
# Make sure genotypes pass works
m.predict_to_excel(filename=excel_file,genotypes=d["genotype"][0])
assert os.path.exists(excel_file)
df = pd.read_excel(excel_file)
assert len(df) == 1
def test_abstractmodel_add_gpm(test_data):
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
bad_gpm = [1,None,"test",[],{}]
for b in bad_gpm:
with pytest.raises(TypeError):
m.add_gpm(b)
m.add_gpm(gpm)
# Test genotype_column arg
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
bad_genotype_column = [1,None,[],{},(1,)]
for b in bad_genotype_column:
with pytest.raises(TypeError):
print(f"trying {b}")
m.add_gpm(gpm,genotype_column=b)
with pytest.raises(KeyError):
m.add_gpm(gpm,genotype_column="not_a_column")
m.add_gpm(gpm,genotype_column="genotype")
assert m.genotype_column == "genotype"
# Test phenotype_column arg
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"])
m = models.linear.EpistasisLinearRegression()
# Shouldn't work b/c no float column
with pytest.raises(ValueError):
m.add_gpm(gpm)
# Shouldn't work because there is no column with that name
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
with pytest.raises(KeyError):
m.add_gpm(gpm,phenotype_column="not_real")
# Shouldn't work because column is not numeric
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["genotype"])
with pytest.raises(ValueError):
m.add_gpm(gpm,phenotype_column="phenotype")
# Make sure it gets right column (first float that is not reserved)
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
coolness=d["phenotype"],
something_else=d["phenotype"])
m.add_gpm(gpm)
assert m.phenotype_column == "coolness"
# Test uncertainty_column arg.
# Do default = None
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
assert m.uncertainty_column == "epi_zero_uncertainty"
unc = np.array(m.gpm.data.loc[:,"epi_zero_uncertainty"])
assert len(np.unique(unc)) == 1
assert np.isclose(unc[0],np.min(gpm.data.loc[:,m.phenotype_column])*1e-6)
# pass missing column
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"],
coolness=d["phenotype"],
not_float=d["genotype"])
# Send in same as phenotype
with pytest.raises(ValueError):
m.add_gpm(gpm,uncertainty_column="phenotype")
# send in not there
with pytest.raises(KeyError):
m.add_gpm(gpm,uncertainty_column="not_there")
# send in not float
with pytest.raises(ValueError):
m.add_gpm(gpm,uncertainty_column="not_float")
# Shoud work
m.add_gpm(gpm,uncertainty_column="coolness")
assert m.uncertainty_column == "coolness"
# Check final output
assert m.gpm is gpm
assert m.Xcolumns is not None
assert m.epistasis is not None
assert m._previous_X is None
def test_gpm_getter(test_data):
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
assert m.gpm is None
m.add_gpm(gpm)
assert m.gpm is gpm
def test_results_getter(test_data):
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
m.add_gpm(gpm)
assert m.results is None
m.fit()
assert isinstance(m.results,pd.DataFrame)
def test_column_getters(test_data):
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"],
uncertainty=d["phenotype"])
m = models.linear.EpistasisLinearRegression()
assert m.genotype_column is None
assert m.phenotype_column is None
assert m.uncertainty_column is None
m.add_gpm(gpm,uncertainty_column="uncertainty")
assert m.genotype_column == "genotype"
assert m.phenotype_column == "phenotype"
assert m.uncertainty_column == "uncertainty"
def test__X_arghandler(test_data):
m = models.linear.EpistasisLinearRegression()
with pytest.raises(ValueError):
m._X()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"],
uncertainty=d["phenotype"])
m.add_gpm(gpm)
# Make sure calling _X() naked-ly populates previous_X
assert m._previous_X is None
X = m._X()
assert m._previous_X is X
# If we access after having run, make sure X is the same object
assert X is m._X()
# Should wipe out previous_X and force recalculation.
m.add_gpm(gpm)
assert X is not m._X()
# Get x for single genotype. should work. should not update _previous_X
X = m._X(d["genotype"][0])
assert len(X) == 1
assert X is not m._previous_X
# Get x for two genotypes. should work and not update _previous_X
X = m._X(d["genotype"][0:2])
assert len(X) == 2
assert X is not m._previous_X
# Get x for two genotypes. should work and not update _previous_X
X = m._X(np.array(d["genotype"][0:2]))
assert len(X) == 2
assert X is not m._previous_X
# Just keep the array, do not update previous_X
hack = np.ones((1,1))
X = m._X(data=hack)
assert X is hack
assert X is not m._previous_X
# pass in bad genotypes
with pytest.raises(ValueError):
X = m._X("NOT_A_GENOTYPE")
with pytest.raises(ValueError):
X = m._X([d["genotype"][0],"NOT_A_GENOTYPE"])
# pass in general badness
bad_passes = [np.ones((1,1,1)),[],"stupid",1,1.1,()]
for b in bad_passes:
with pytest.raises(ValueError):
m._X(b)
def test__y_arghandler(test_data):
m = models.linear.EpistasisLinearRegression()
with pytest.raises(ValueError):
m._y()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
coolness=d["phenotype"],
uncertainty=d["phenotype"])
m.add_gpm(gpm,phenotype_column="coolness")
assert np.array_equal(m._y(),d["phenotype"])
# pass in general badness
bad_passes = [np.ones((1,1,1)),[],"stupid",1,1.1,()]
for b in bad_passes:
with pytest.raises(TypeError):
print(f"trying {b}")
m._y(b)
y = m._y([1.0])
assert np.array_equal(y,[1.0])
def test__yerr_arghandler(test_data):
m = models.linear.EpistasisLinearRegression()
with pytest.raises(ValueError):
m._yerr()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
coolness=d["phenotype"],
uncertainty=d["phenotype"])
m.add_gpm(gpm,phenotype_column="coolness",uncertainty_column="uncertainty")
assert np.array_equal(m._yerr(),d["phenotype"])
# pass in general badness
bad_passes = [np.ones((1,1,1)),[],"stupid",1,1.1,()]
for b in bad_passes:
with pytest.raises(TypeError):
print(f"trying {b}")
m._yerr(b)
y = m._yerr([1.0])
assert np.array_equal(y,[1.0])
def test__thetas_arghandler(test_data):
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
coolness=d["phenotype"],
uncertainty=d["phenotype"])
m.add_gpm(gpm,phenotype_column="coolness",uncertainty_column="uncertainty")
# No thetas calcualted yet
with pytest.raises(RuntimeError):
m._thetas()
m.fit()
# Get thetas, calcualted
t = m._thetas()
assert len(t) == 4
# pass in general badness
bad_passes = [ | np.ones((1,1,1)) | numpy.ones |
#!/usr/bin/python3
from __future__ import print_function
import sys
import numpy as np
import banalg
import matplotlib.pyplot as plt
from mnist import MNIST
from sklearn.decomposition import PCA
from os import path
#=================
# Plot Cumulative Regret
#=================
def plot(title, regret, labels):
"""
@param title: graph title
@param regret: T+1 x len(bandits) cumulative regret
@param labels: label[i] for bandits[i]
Plots regret curve.
"""
plt.title(title)
T = regret.shape[0]
t = np.arange(T)
for i, l in enumerate(labels):
plt.plot(t, regret[:, i], label=l)
plt.xlim(0, T)
plt.xlabel("Time")
plt.ylabel("Cumulative Regret")
plt.legend()
plt.show()
#=================
# General Bandit Alg
#=================
def run(T, bandits, contexts, rewards, contexts_test, rewards_test, posthocs, postMat):
"""
@param bandits: list of initialized bandit algorithms
@param contexts: TxdF of PCA'd MNIST images
@param rewards: Txn 1-hot labels
@return floor(T/Trec)+1 x len(bandits) test class error vectors
"""
# Define constants
cum_regret = np.zeros((T+1, len(bandits)))
indices = np.arange(contexts.shape[0])
np.random.shuffle(indices)
for t in range(T):
ind = indices[t]
if t % 10 == 0:
print("Round: %d" % t)
for i in range(len(bandits)):
print("Bandit %d Regret %f" % (i, cum_regret[t, i]))
# Choose arm for each bandit
I_t = []
for bandit in bandits:
I_t.append(bandit.choice(t, contexts[ind, :]))
# Update bandits
for i, bandit in enumerate(bandits):
# Assume pi_star is perfect, reward=1
regret = 1 - rewards[ind, I_t[i]]
cum_regret[t+1, i] = cum_regret[t, i] + regret
bandit.update(contexts[ind, :], I_t[i], rewards[ind, I_t[i]], posthocs[ind, :])
# Finished, return error array
print("Finished T=%d rounds!" % T)
return cum_regret
VAL_SIZE = 10000
def gen_context(mndata, dF, trueTest=True):
print("Loading Training Set...")
images, labels = mndata.load_training()
if trueTest:
print("Loading Test Set...")
images_test, labels_test = mndata.load_testing()
else:
print("Loading Validation Set...")
images_test = images[len(images) - VAL_SIZE:len(images)]
images = images[0:len(images) - VAL_SIZE]
labels_test = labels[len(labels) - VAL_SIZE:len(labels)]
labels = labels[0:len(labels) - VAL_SIZE]
# Format labels
labels = np.array(labels)
labels_test = np.array(labels_test)
Ttrain = len(labels)
Ttest = len(labels_test)
print("T_train=%d" % Ttrain)
print("T_val=%d" % Ttest)
n = labels.max() + 1
# Create 1-hot rewards
rewards = np.zeros((Ttrain, n))
rewards[np.arange(labels.size),labels] = 1
rewards_test = np.zeros((Ttest, n))
rewards_test[np.arange(labels_test.size),labels_test] = 1
# PCA Contexts
images = np.array(images)
images_test = | np.array(images_test) | numpy.array |
"""
Provides the class `LdaCgsViewer`.
"""
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from builtins import str
from builtins import range
from builtins import object
import numpy as np
from vsm.spatial import H, JS_dist, KL_div
from vsm.structarr import *
from vsm.split import split_corpus
from vsm.exceptions import *
from .types import *
from .labeleddata import *
from .wrappers import *
__all__ = [ 'LdaCgsViewer' ]
class LdaCgsViewer(object):
"""A class for viewing a topic model estimated by one of vsm's LDA
classes using CGS.
"""
def __init__(self, corpus, model):
"""
Initialize LdaCgsViewer.
:param corpus: Source of observed data.
:type corpus: :class:`Corpus`
:param model: An LDA model estimated by a CGS.
:type model: LdaCgsSeq
"""
self.corpus = corpus
self.model = model
self._phi = None
self._theta = None
self._H_phi = None
self._H_theta = None
self._labels = None
@property
def _doc_label_name(self):
return doc_label_name(self.model.context_type)
def _res_doc_type(self, doc):
return res_doc_type(self.corpus, self.model.context_type,
self._doc_label_name, doc)
def _res_word_type(self, word):
return res_word_type(self.corpus, word)
@property
def labels(self):
"""Returns the list of document labels."""
if self._labels is None:
self._labels = self.corpus.view_metadata(self.model.context_type)
self._labels = self._labels[self._doc_label_name]
return self._labels
@property
def phi(self):
"""Returns the word by topic matrix from the model as a right
stochastic matrix (the columns phi_i are probability
distributions).
"""
if self._phi is None:
self._phi = self.model.word_top / self.model.word_top.sum(0)
return self._phi
@property
def theta(self):
"""Returns the topic by document matrix from the model as a right
stochastic matrix (the columns theta_i are probability
distributions.
"""
if self._theta is None:
self._theta = self.model.top_doc / self.model.top_doc.sum(0)
return self._theta
@property
def H_phi(self):
"""Returns the entropies of the columns of phi (i.e., topics)
"""
if self._H_phi is None:
self._H_phi = H(self.phi.T)
return self._H_phi
@property
def H_theta(self):
"""Returns the entropies of the columns of theta.
"""
if self._H_theta is None:
self._H_theta = H(self.theta.T)
return self._H_theta
def topic_entropies(self, print_len=10):
"""Returns the entropies of the topics of the model as an array sorted
by entropy.
"""
H_phi = self.H_phi
k_arr = enum_sort(H_phi).view(LabeledColumn)
k_arr.col_header = 'Topic Entropies'
k_arr.subcol_headers = ['Index', 'Entropy']
k_arr.col_len = print_len
return k_arr[::-1]
def doc_entropies(self, print_len=10,
label_fn=def_label_fn, as_strings=True):
"""Returns the entropies of the distributions over topics as an
array sorted by entropy.
"""
if as_strings:
md = self.corpus.view_metadata(self.model.context_type)
docs = label_fn(md)
d_arr = enum_sort(self.H_theta, indices=docs, field_name='doc')
else:
d_arr = enum_sort(self.H_theta)
d_arr = d_arr.view(LabeledColumn)
d_arr.col_header = 'Document Entropies'
d_arr.subcol_headers = ['Document', 'Entropy']
d_arr.col_len = print_len
return d_arr[::-1]
def topic_oscillations(self, print_len=10, div_fn=KL_div):
"""Returns the oscillation in the divergences of documents
from each topic k, represented as a categorical distribution
over topics with mass concentrated at index k.
Oscillation is computed as the difference between the maximum
and the minimum of the divergences.
Returns an array sorted by descending oscillation.
"""
topic_indices = np.arange(self.model.K)
pseudo_docs = np.diag( | np.ones(self.model.K, dtype='d') | numpy.ones |
from collections import OrderedDict
from datetime import date
import numpy as np
from Constants import Constants
from DCN_Experiments import DCN_Experiments
from PS_Manager import PS_Manager
from PS_Treated_Generator import PS_Treated_Generator
from TARNet_Experiments import TARNet_Experiments
from Utils import Utils
from dataloader import DataLoader
class Experiments:
def __init__(self, running_mode):
self.dL = DataLoader()
self.running_mode = running_mode
self.np_covariates_X_train = None
self.np_covariates_X_test = None
self.np_covariates_T_train = None
self.np_covariates_T_test = None
def run_all_experiments(self, train_path, test_path, iterations, ps_model_type):
device = Utils.get_device()
print(device)
results_list = []
run_parameters = self.__get_run_parameters()
print(str(run_parameters["summary_file_name"]))
file1 = open(run_parameters["summary_file_name"], "a")
for iter_id in range(iterations):
print("--" * 20)
print("iter_id: {0}".format(iter_id))
print("Jobs - NN")
print("--" * 20)
input_nodes = run_parameters["input_nodes"]
self.np_covariates_X_train, self.np_covariates_X_test, self.np_covariates_X_val, \
self.np_covariates_T_train, \
self.np_covariates_T_test, self.np_covariates_T_val \
= self.__load_data(train_path, test_path, iter_id)
# get propensity score for classifier training and testing
ps_score_list_train, ps_score_list_val, ps_score_list_test, ps_model = \
self.__get_ps_model(ps_model_type,
iter_id,
run_parameters["input_nodes"],
device)
run_parameters["consolidated_file_path"] = self.get_consolidated_file_name(ps_model_type)
print("--->>Train size: ")
data_loader_dict_train = self.dL.prepare_tensor_for_DCN(self.np_covariates_X_train,
self.np_covariates_T_train,
ps_score_list_train,
run_parameters["is_synthetic"])
print("--->>Validation size: ")
data_loader_dict_val = self.dL.prepare_tensor_for_DCN(self.np_covariates_X_val,
self.np_covariates_T_val,
ps_score_list_val,
run_parameters["is_synthetic"])
print("--->>Test size: ")
data_loader_dict_test = self.dL.prepare_tensor_for_DCN(self.np_covariates_X_test,
self.np_covariates_T_test,
ps_score_list_test,
run_parameters["is_synthetic"])
n_treated_original = data_loader_dict_train["treated_data"][0].shape[0]
n_control_original = data_loader_dict_train["control_data"][0].shape[0]
# Execute PM GAN
ps_t = PS_Treated_Generator(data_loader_dict_train, data_loader_dict_val, ps_model, ps_model_type)
balanced_dataset_dict = ps_t.simulate_treated_semi_supervised(input_nodes, iter_id, device)
tensor_treated_balanced_dcn = balanced_dataset_dict["tensor_treated_balanced_dcn"]
tensor_control_balanced_dcn = balanced_dataset_dict["tensor_control_balanced_dcn"]
n_treated_balanced_dcn = balanced_dataset_dict["n_treated_balanced_dcn"]
n_control_balanced_dcn = balanced_dataset_dict["n_control_balanced_dcn"]
tensor_balanced_tarnet = balanced_dataset_dict["tensor_balanced_tarnet"]
n_total_balanced_tarnet = balanced_dataset_dict["n_total_balanced_tarnet"]
n_treated_balanced_tarnet = balanced_dataset_dict["n_treated_balanced_tarnet"]
print("---" * 20)
print("-----------> !! Supervised Training(DCN Models ) !!<-----------")
# run DCN Models
tensor_treated_train_original = \
Utils.create_tensors_from_tuple(data_loader_dict_train["treated_data"])
tensor_control_train_original = \
Utils.create_tensors_from_tuple(data_loader_dict_train["control_data"])
model_save_paths = {
"Model_DCN_PD_shared": run_parameters["Model_DCN_PD_shared"].format(iter_id),
"Model_DCN_PD_y1": run_parameters["Model_DCN_PD_y1"].format(iter_id),
"Model_DCN_PD_y0": run_parameters["Model_DCN_PD_y0"].format(iter_id),
"Model_DCN_PD_02_shared": run_parameters["Model_DCN_PD_02_shared"].format(iter_id),
"Model_DCN_PD_02_y1": run_parameters["Model_DCN_PD_02_y1"].format(iter_id),
"Model_DCN_PD_02_y0": run_parameters["Model_DCN_PD_02_y0"].format(iter_id),
"Model_DCN_PD_05_shared": run_parameters["Model_DCN_PD_05_shared"].format(iter_id),
"Model_DCN_PD_05_y1": run_parameters["Model_DCN_PD_05_y1"].format(iter_id),
"Model_DCN_PD_05_y0": run_parameters["Model_DCN_PD_05_y0"].format(iter_id),
"Model_DCN_PM_GAN_shared": run_parameters["Model_DCN_PM_GAN_shared"].format(iter_id),
"Model_DCN_PM_GAN_y1": run_parameters["Model_DCN_PM_GAN_y1"].format(iter_id),
"Model_DCN_PM_GAN_y0": run_parameters["Model_DCN_PM_GAN_y0"].format(iter_id),
"Model_DCN_PM_GAN_02_shared": run_parameters["Model_DCN_PM_GAN_02_shared"].format(iter_id),
"Model_DCN_PM_GAN_02_y1": run_parameters["Model_DCN_PM_GAN_02_y1"].format(iter_id),
"Model_DCN_PM_GAN_02_y0": run_parameters["Model_DCN_PM_GAN_02_y0"].format(iter_id),
"Model_DCN_PM_GAN_05_shared": run_parameters["Model_DCN_PM_GAN_05_shared"].format(iter_id),
"Model_DCN_PM_GAN_05_y1": run_parameters["Model_DCN_PM_GAN_05_y1"].format(iter_id),
"Model_DCN_PM_GAN_05_y0": run_parameters["Model_DCN_PM_GAN_05_y0"].format(iter_id),
"Model_DCN_PM_GAN_PD_shared": run_parameters["Model_DCN_PM_GAN_PD_shared"].format(iter_id),
"Model_DCN_PM_GAN_PD_y1": run_parameters["Model_DCN_PM_GAN_PD_y1"].format(iter_id),
"Model_DCN_PM_GAN_PD_y0": run_parameters["Model_DCN_PM_GAN_PD_y0"].format(iter_id)
}
dcn_experiments = DCN_Experiments(input_nodes, device)
dcn_pd_models_eval_dict = dcn_experiments.evaluate_DCN_Model(tensor_treated_train_original,
tensor_control_train_original,
n_treated_original,
n_control_original,
tensor_treated_balanced_dcn,
tensor_control_balanced_dcn,
n_treated_balanced_dcn,
n_control_balanced_dcn,
data_loader_dict_val,
data_loader_dict_test,
model_save_paths)
print("---" * 20)
print("-----------> !! Supervised Evaluation(DCN Models) !! <-----------")
print("---" * 20)
print("--> 1. Model 1: DCN - PD Supervised Training Evaluation: ")
dcn_pd_eval = dcn_pd_models_eval_dict["dcn_pd_eval_dict"]
dcn_pd_ate_pred, dcn_pd_att_pred, dcn_pd_bias_att, dcn_pd_atc_pred, dcn_pd_policy_value, \
dcn_pd_policy_risk, dcn_pd_err_fact = \
self.__process_evaluated_metric(
dcn_pd_eval["yf_list"],
dcn_pd_eval["e_list"],
dcn_pd_eval["T_list"],
dcn_pd_eval["y1_hat_list"],
dcn_pd_eval["y0_hat_list"],
dcn_pd_eval["ITE_dict_list"],
dcn_pd_eval["predicted_ITE"],
run_parameters["DCN_PD"],
iter_id)
print("---" * 20)
print("--> 2. Model 2: DCN - PD(Dropout 0.5) Supervised Training Evaluation: ")
dcn_pd_05_eval_dict = dcn_pd_models_eval_dict["dcn_pd_05_eval_dict"]
dcn_pd_05_ate_pred, dcn_pd_05_att_pred, dcn_pd_05_bias_att, dcn_pd_05_atc_pred, \
dcn_pd_05_policy_value, \
dcn_pd_05_policy_risk, dcn_pd_05_err_fact = \
self.__process_evaluated_metric(
dcn_pd_05_eval_dict["yf_list"],
dcn_pd_05_eval_dict["e_list"],
dcn_pd_05_eval_dict["T_list"],
dcn_pd_05_eval_dict["y1_hat_list"],
dcn_pd_05_eval_dict["y0_hat_list"],
dcn_pd_05_eval_dict["ITE_dict_list"],
dcn_pd_05_eval_dict["predicted_ITE"],
run_parameters["DCN_PD_05"],
iter_id)
print("---" * 20)
print("--> 3. Model 3: PM GAN - No dropout Supervised Training Evaluation: ")
dcn_pm_gan_eval = dcn_pd_models_eval_dict["dcn_pm_gan_eval_dict"]
dcn_pm_gan_ate_pred, dcn_pm_gan_att_pred, dcn_pm_gan_bias_att, dcn_pm_gan_atc_pred, \
dcn_pm_gan_policy_value, dcn_pm_gan_policy_risk, dcn_pm_gan_err_fact = \
self.__process_evaluated_metric(
dcn_pm_gan_eval["yf_list"],
dcn_pm_gan_eval["e_list"],
dcn_pm_gan_eval["T_list"],
dcn_pm_gan_eval["y1_hat_list"],
dcn_pm_gan_eval["y0_hat_list"],
dcn_pm_gan_eval["ITE_dict_list"],
dcn_pm_gan_eval["predicted_ITE"],
run_parameters["DCN_PM_GAN"],
iter_id)
print("---" * 20)
print("--> 4. Model 4: PM GAN - dropout 0.5 Supervised Training Evaluation: ")
dcn_pm_gan_eval_05 = dcn_pd_models_eval_dict["dcn_pm_gan_eval_drp_05_dict"]
dcn_pm_gan_05_ate_pred, dcn_pm_gan_05_att_pred, dcn_pm_gan_05_bias_att, dcn_pm_gan_05_atc_pred, \
dcn_pm_gan_05_policy_value, dcn_pm_gan_05_policy_risk, dcn_pm_gan_05_err_fact = \
self.__process_evaluated_metric(
dcn_pm_gan_eval_05["yf_list"],
dcn_pm_gan_eval_05["e_list"],
dcn_pm_gan_eval_05["T_list"],
dcn_pm_gan_eval_05["y1_hat_list"],
dcn_pm_gan_eval_05["y0_hat_list"],
dcn_pm_gan_eval_05["ITE_dict_list"],
dcn_pm_gan_eval_05["predicted_ITE"],
run_parameters["DCN_PM_GAN_05"],
iter_id)
print("---" * 20)
print("--> 5. Model 5: PM GAN - PD Supervised Training Evaluation: ")
dcn_pm_gan_eval_pd = dcn_pd_models_eval_dict["dcn_pm_gan_eval_pd_dict"]
dcn_pm_gan_pd_ate_pred, dcn_pm_gan_pd_att_pred, dcn_pm_gan_pd_bias_att, dcn_pm_gan_pd_atc_pred, \
dcn_pm_gan_pd_policy_value, dcn_pm_gan_pd_policy_risk, dcn_pm_gan_pd_err_fact = \
self.__process_evaluated_metric(
dcn_pm_gan_eval_pd["yf_list"],
dcn_pm_gan_eval_pd["e_list"],
dcn_pm_gan_eval_pd["T_list"],
dcn_pm_gan_eval_pd["y1_hat_list"],
dcn_pm_gan_eval_pd["y0_hat_list"],
dcn_pm_gan_eval_pd["ITE_dict_list"],
dcn_pm_gan_eval_pd["predicted_ITE"],
run_parameters["DCN_PM_GAN_PD"],
iter_id)
print("---" * 20)
print("---" * 20)
# run TARNet Models
print("-----------> !! Supervised Training(TARNet Models) !!<-----------")
tarnet_experiments = TARNet_Experiments(input_nodes, device)
tarnet_experiments_models_eval_dict = tarnet_experiments.evaluate_TARNet_Model(
data_loader_dict_train["treated_data"],
data_loader_dict_train["control_data"],
tensor_balanced_tarnet,
data_loader_dict_val,
data_loader_dict_test,
n_total_balanced_tarnet,
n_treated_balanced_tarnet)
print("---" * 20)
print("---> !! Supervised Evaluation(TARNet Models) !! <---")
print("---" * 20)
print("--> 1. Model 1: TARNet Supervised Training Evaluation: ")
tarnet_eval = tarnet_experiments_models_eval_dict["tarnet_eval_dict"]
tarnet_ate_pred, tarnet_att_pred, tarnet_bias_att, tarnet_atc_pred, \
tarnet_policy_value, tarnet_policy_risk, tarnet_err_fact = \
self.__process_evaluated_metric(
tarnet_eval["yf_list"],
tarnet_eval["e_list"],
tarnet_eval["T_list"],
tarnet_eval["y1_hat_list"],
tarnet_eval["y0_hat_list"],
tarnet_eval["ITE_dict_list"],
tarnet_eval["predicted_ITE"],
run_parameters["TARNET"],
iter_id)
print("--> 2. Model 2: TARNet PM GAN Supervised Training Evaluation: ")
tarnet_pm_gan_eval = tarnet_experiments_models_eval_dict["tarnet_pm_gan_eval_dict"]
tarnet_pm_gan_ate_pred, tarnet_pm_gan_att_pred, tarnet_pm_gan_bias_att, tarnet_pm_gan_atc_pred, \
tarnet_pm_gan_policy_value, tarnet_pm_gan_policy_risk, tarnet_pm_gan_err_fact = \
self.__process_evaluated_metric(
tarnet_pm_gan_eval["yf_list"],
tarnet_pm_gan_eval["e_list"],
tarnet_pm_gan_eval["T_list"],
tarnet_pm_gan_eval["y1_hat_list"],
tarnet_pm_gan_eval["y0_hat_list"],
tarnet_pm_gan_eval["ITE_dict_list"],
tarnet_pm_gan_eval["predicted_ITE"],
run_parameters["TARNET"],
iter_id)
print("---" * 20)
result_dict = OrderedDict()
result_dict["iter_id"] = iter_id
result_dict["dcn_pd_ate_pred"] = dcn_pd_ate_pred
result_dict["dcn_pd_att_pred"] = dcn_pd_att_pred
result_dict["dcn_pd_bias_att"] = dcn_pd_bias_att
result_dict["dcn_pd_atc_pred"] = dcn_pd_atc_pred
result_dict["dcn_pd_policy_value"] = dcn_pd_policy_value
result_dict["dcn_pd_policy_risk"] = dcn_pd_policy_risk
result_dict["dcn_pd_err_fact"] = dcn_pd_err_fact
result_dict["dcn_pd_05_ate_pred"] = dcn_pd_05_ate_pred
result_dict["dcn_pd_05_att_pred"] = dcn_pd_05_att_pred
result_dict["dcn_pd_05_bias_att"] = dcn_pd_05_bias_att
result_dict["dcn_pd_05_atc_pred"] = dcn_pd_05_atc_pred
result_dict["dcn_pd_05_policy_value"] = dcn_pd_05_policy_value
result_dict["dcn_pd_05_policy_risk"] = dcn_pd_05_policy_risk
result_dict["dcn_pd_05_err_fact"] = dcn_pd_05_err_fact
result_dict["dcn_pm_gan_ate_pred"] = dcn_pm_gan_ate_pred
result_dict["dcn_pm_gan_att_pred"] = dcn_pm_gan_att_pred
result_dict["dcn_pm_gan_bias_att"] = dcn_pm_gan_bias_att
result_dict["dcn_pm_gan_atc_pred"] = dcn_pm_gan_atc_pred
result_dict["dcn_pm_gan_policy_value"] = dcn_pm_gan_policy_value
result_dict["dcn_pm_gan_policy_risk"] = dcn_pm_gan_policy_risk
result_dict["dcn_pm_gan_err_fact"] = dcn_pm_gan_err_fact
result_dict["dcn_pm_gan_05_att_pred"] = dcn_pm_gan_05_ate_pred
result_dict["dcn_pm_gan_05_att_pred"] = dcn_pm_gan_05_att_pred
result_dict["dcn_pm_gan_05_bias_att"] = dcn_pm_gan_05_bias_att
result_dict["dcn_pm_gan_05_atc_pred"] = dcn_pm_gan_05_atc_pred
result_dict["dcn_pm_gan_05_policy_value"] = dcn_pm_gan_05_policy_value
result_dict["dcn_pm_gan_05_policy_risk"] = dcn_pm_gan_05_policy_risk
result_dict["dcn_pm_gan_05_err_fact"] = dcn_pm_gan_05_err_fact
result_dict["dcn_pm_gan_pd_att_pred"] = dcn_pm_gan_pd_ate_pred
result_dict["dcn_pm_gan_pd_att_pred"] = dcn_pm_gan_pd_att_pred
result_dict["dcn_pm_gan_pd_bias_att"] = dcn_pm_gan_pd_bias_att
result_dict["dcn_pm_gan_pd_atc_pred"] = dcn_pm_gan_pd_atc_pred
result_dict["dcn_pm_gan_pd_policy_value"] = dcn_pm_gan_pd_policy_value
result_dict["dcn_pm_gan_pd_policy_risk"] = dcn_pm_gan_pd_policy_risk
result_dict["dcn_pm_gan_pd_err_fact"] = dcn_pm_gan_pd_err_fact
result_dict["tarnet_ate_pred"] = tarnet_ate_pred
result_dict["tarnet_att_pred"] = tarnet_att_pred
result_dict["tarnet_bias_att"] = tarnet_bias_att
result_dict["tarnet_atc_pred"] = tarnet_atc_pred
result_dict["tarnet_policy_value"] = tarnet_policy_value
result_dict["tarnet_policy_risk"] = tarnet_policy_risk
result_dict["tarnet_err_fact"] = tarnet_err_fact
result_dict["tarnet_pm_gan_ate_pred"] = tarnet_pm_gan_ate_pred
result_dict["tarnet_pm_gan_att_pred"] = tarnet_pm_gan_att_pred
result_dict["tarnet_pm_gan_bias_att"] = tarnet_pm_gan_bias_att
result_dict["tarnet_pm_gan_atc_pred"] = tarnet_pm_gan_atc_pred
result_dict["tarnet_pm_gan_policy_value"] = tarnet_pm_gan_policy_value
result_dict["tarnet_pm_gan_policy_risk"] = tarnet_pm_gan_policy_risk
result_dict["tarnet_pm_gan_err_fact"] = tarnet_pm_gan_err_fact
file1.write("\nToday's date: {0}\n".format(date.today()))
file1.write("Iter: {0}, bias_att_DCN_PD: {1}, bias_att_DCN_PD(0.5): {2}, "
"bias_att_DCN_PM_GAN: {3}, "
"bias_att_DCN_PM_GAN_05: {4}, bias_att_DCN_PM_GAN(PD): {5}, "
"policy_risk_DCN_PD: {6}, "
"policy_risk_DCN_PD(0.5): {7}, policy_risk_DCN_PM_GAN: {8}, "
"policy_risk_PM_GAN_05: {9}, policy_risk_PM_GAN(PD): {10}, "
.format(iter_id, dcn_pd_bias_att,
dcn_pd_05_bias_att,
dcn_pm_gan_bias_att,
dcn_pm_gan_05_bias_att, dcn_pm_gan_pd_bias_att,
dcn_pd_policy_risk, dcn_pd_05_policy_risk,
dcn_pm_gan_policy_risk,
dcn_pm_gan_05_policy_risk,
dcn_pm_gan_pd_policy_risk))
results_list.append(result_dict)
bias_att_set_DCN_PD = []
policy_risk_set_DCN_PD = []
bias_att_set_DCN_PD_05 = []
policy_risk_set_DCN_PD_05 = []
bias_att_DCN_PM_GAN = []
policy_risk_set_DCN_PM_GAN = []
bias_att_DCN_PM_GAN_05 = []
policy_risk_set_DCN_PM_GAN_05 = []
bias_att_DCN_PM_GAN_PD = []
policy_risk_set_DCN_PM_GAN_PD = []
bias_att_tarnet = []
policy_risk_set_tarnet = []
bias_att_tarnet_PM_GAN = []
policy_risk_set_tarnet_PM_GAN = []
for result in results_list:
bias_att_set_DCN_PD.append(result["dcn_pd_bias_att"])
policy_risk_set_DCN_PD.append(result["dcn_pd_policy_risk"])
bias_att_set_DCN_PD_05.append(result["dcn_pd_05_bias_att"])
policy_risk_set_DCN_PD_05.append(result["dcn_pd_05_policy_risk"])
bias_att_DCN_PM_GAN.append(result["dcn_pm_gan_bias_att"])
policy_risk_set_DCN_PM_GAN.append(result["dcn_pm_gan_policy_risk"])
bias_att_DCN_PM_GAN_05.append(result["dcn_pm_gan_05_bias_att"])
policy_risk_set_DCN_PM_GAN_05.append(result["dcn_pm_gan_05_policy_risk"])
bias_att_DCN_PM_GAN_PD.append(result["dcn_pm_gan_pd_bias_att"])
policy_risk_set_DCN_PM_GAN_PD.append(result["dcn_pm_gan_pd_policy_risk"])
bias_att_tarnet.append(result["tarnet_bias_att"])
policy_risk_set_tarnet.append(result["tarnet_policy_risk"])
bias_att_tarnet_PM_GAN.append(result["tarnet_pm_gan_bias_att"])
policy_risk_set_tarnet_PM_GAN.append(result["tarnet_pm_gan_policy_risk"])
bias_att_DCN_PD_mean = np.mean(np.array(bias_att_set_DCN_PD))
bias_att_DCN_PD_std = np.std(bias_att_set_DCN_PD)
policy_risk_set_DCN_PD_mean = np.mean(np.array(policy_risk_set_DCN_PD))
policy_risk_set_DCN_PD_std = np.std(policy_risk_set_DCN_PD)
bias_att_DCN_PD_mean_05 = np.mean(np.array(bias_att_set_DCN_PD_05))
bias_att_DCN_PD_std_05 = np.std(bias_att_set_DCN_PD_05)
policy_risk_set_DCN_PD_mean_05 = np.mean(np.array(policy_risk_set_DCN_PD_05))
policy_risk_set_DCN_PD_std_05 = np.std(policy_risk_set_DCN_PD_05)
bias_att_DCN_PM_GAN_mean = np.mean(np.array(bias_att_DCN_PM_GAN))
bias_att_DCN_PM_GAN_std = np.std(bias_att_DCN_PM_GAN)
policy_risk_set_DCN_PM_GAN_mean = np.mean(np.array(policy_risk_set_DCN_PM_GAN))
policy_risk_set_DCN_PM_GAN_std = np.std(policy_risk_set_DCN_PM_GAN)
bias_att_DCN_PM_GAN_05_mean = np.mean(np.array(bias_att_DCN_PM_GAN_05))
bias_att_DCN_PM_GAN_05_std = np.std(bias_att_DCN_PM_GAN_05)
policy_risk_DCN_PM_GAN_05_mean = np.mean(np.array(policy_risk_set_DCN_PM_GAN_05))
policy_risk_DCN_PM_GAN_05_std = np.std(policy_risk_set_DCN_PM_GAN_05)
bias_att_DCN_PM_GAN_mean_PD = np.mean(np.array(bias_att_DCN_PM_GAN_PD))
bias_att_DCN_PM_GAN_std_PD = np.std(bias_att_DCN_PM_GAN_PD)
policy_risk_DCN_PM_GAN_mean_PD = np.mean(np.array(policy_risk_set_DCN_PM_GAN_PD))
policy_risk_DCN_PM_GAN_std_PD = np.std(policy_risk_set_DCN_PM_GAN_PD)
bias_att_tarnet_mean = np.mean(np.array(bias_att_tarnet))
bias_att_tarnet_std = np.std(bias_att_tarnet)
policy_risk_tarnet_mean = np.mean(np.array(policy_risk_set_tarnet))
policy_risk_tarnet_std = np.std(policy_risk_set_tarnet)
bias_att_tarnet_PM_GAN_mean = np.mean(np.array(bias_att_tarnet_PM_GAN))
bias_att_tarnet_PM_GAN_std = np.std(bias_att_tarnet_PM_GAN)
policy_risk_tarnet_PM_GAN_mean = np.mean(np.array(policy_risk_set_tarnet_PM_GAN))
policy_risk_tarnet_PM_GAN_std = np.std(policy_risk_set_tarnet_PM_GAN)
print("###" * 20)
print("----------------- !!DCN Models(Results) !! ------------------------")
print("--" * 20)
print("Model 1: DCN_PD")
print("DCN_PD, Bias: {0}, SD: {1}"
.format(bias_att_DCN_PD_mean, bias_att_DCN_PD_std))
print("DCN_PD, Policy Risk: {0}, SD: {1}"
.format(policy_risk_set_DCN_PD_mean, policy_risk_set_DCN_PD_std))
print("--" * 20)
print("Model 2: DCN_PD(0.5)")
print("DCN_PD(0.5), Bias: {0}, SD: {1}"
.format(bias_att_DCN_PD_mean_05, bias_att_DCN_PD_std_05))
print("DCN_PD(0.5), Policy Risk: {0}, SD: {1}"
.format(policy_risk_set_DCN_PD_mean_05, policy_risk_set_DCN_PD_std_05))
print("--" * 20)
print("Model 3: DCN PM GAN")
print("DCN PM GAN, Bias: {0}, SD: {1}"
.format(bias_att_DCN_PM_GAN_mean, bias_att_DCN_PM_GAN_std))
print("DCN PM GAN, Policy Risk: {0}, SD: {1}"
.format(policy_risk_set_DCN_PM_GAN_mean, policy_risk_set_DCN_PM_GAN_std))
print("--" * 20)
print("Model 4: DCN PM GAN Dropout 0.5")
print("DCN PM GAN Dropout 0.5, Bias: {0}, SD: {1}"
.format(bias_att_DCN_PM_GAN_05_mean, bias_att_DCN_PM_GAN_05_std))
print("DCN PM GAN Dropout 0.5, Policy Risk: {0}, SD: {1}"
.format(policy_risk_DCN_PM_GAN_05_mean, policy_risk_DCN_PM_GAN_05_std))
print("--" * 20)
print("Model 5: DCN PM GAN(PD)")
print("DCN PM GAN(PD), Bias: {0}, SD: {1}"
.format(bias_att_DCN_PM_GAN_mean_PD, bias_att_DCN_PM_GAN_std_PD))
print("DCN PM GAN(PD), Policy Risk: {0}, SD: {1}"
.format(policy_risk_DCN_PM_GAN_mean_PD, policy_risk_DCN_PM_GAN_std_PD))
print("--" * 20)
print("###" * 20)
print("----------------- !!TARNet Models(Results) !! ------------------------")
print("--" * 20)
print("Model 1: TARNET")
print("TARNET, Bias: {0}, SD: {1}"
.format(bias_att_tarnet_mean, bias_att_tarnet_std))
print("TARNET, Policy Risk: {0}, SD: {1}"
.format(policy_risk_tarnet_mean, policy_risk_tarnet_std))
print("--" * 20)
print("Model 2: TARNET PM GAN")
print("TARNET PM GAN, Bias: {0}, SD: {1}"
.format(bias_att_tarnet_PM_GAN_mean, bias_att_tarnet_PM_GAN_std))
print("TARNET PM GAN, Policy Risk: {0}, SD: {1}"
.format(policy_risk_tarnet_PM_GAN_mean, policy_risk_tarnet_PM_GAN_std))
print("--" * 3)
print("###" * 3)
file1.write("\n########")
file1.write("\nDCN Models")
file1.write("\n------")
file1.write("\nModel 1: DCN_PD")
file1.write("\nDCN_PD, Bias att: {0}, SD: {1}"
.format(bias_att_DCN_PD_mean, bias_att_DCN_PD_std))
file1.write("\nDCN_PD, Policy Risk: {0}, SD: {1}"
.format(policy_risk_set_DCN_PD_mean,
policy_risk_set_DCN_PD_std))
file1.write("\n------")
file1.write("\nModel 2: DCN_PD(0.5)")
file1.write("\nDCN_PD(0.5), Bias att: {0}, SD: {1}"
.format(bias_att_DCN_PD_mean_05, bias_att_DCN_PD_std_05))
file1.write("\nDCN_PD(0.5), Policy Risk: {0}, SD: {1}"
.format(policy_risk_set_DCN_PD_mean_05,
policy_risk_set_DCN_PD_std_05))
file1.write("\n------")
file1.write("\nModel 3: DCN PM GAN")
file1.write("\nDCN PM GAN, Bias att: {0}, SD: {1}"
.format(bias_att_DCN_PM_GAN_mean, bias_att_DCN_PM_GAN_std))
file1.write("\nDCN PM GAN, Policy Risk: {0}, SD: {1}"
.format(policy_risk_set_DCN_PM_GAN_mean,
policy_risk_set_DCN_PM_GAN_std))
file1.write("\n------")
file1.write("\nModel 4: DCN PM GAN Dropout 0.5")
file1.write("\nDCN PM GAN Dropout 0.5, Bias att: {0}, SD: {1}"
.format(bias_att_DCN_PM_GAN_05_mean, bias_att_DCN_PM_GAN_05_std))
file1.write("\nDCN PM GAN Dropout 0.5, Policy Risk: {0}, SD: {1}"
.format(policy_risk_DCN_PM_GAN_05_mean,
policy_risk_DCN_PM_GAN_05_std))
file1.write("\n------")
file1.write("\nModel 5: DCN PM GAN PD")
file1.write("\nDCN PM GAN Dropout PD, Bias att: {0}, SD: {1}"
.format(bias_att_DCN_PM_GAN_mean_PD, bias_att_DCN_PM_GAN_std_PD))
file1.write("\nDCN PM GAN Dropout PD, Policy Risk: {0}, SD: {1}"
.format(policy_risk_DCN_PM_GAN_mean_PD,
policy_risk_DCN_PM_GAN_std_PD))
file1.write("\n------")
file1.write("\n########")
file1.write("\nTARNET Models")
file1.write("\n------")
file1.write("\nModel 1: TARNET")
file1.write("\nTARNET, Bias att: {0}, SD: {1}"
.format(bias_att_tarnet_mean, bias_att_tarnet_std))
file1.write("\nTARNET, Policy Risk: {0}, SD: {1}"
.format(policy_risk_tarnet_mean,
policy_risk_tarnet_std))
file1.write("\n------")
file1.write("\nModel 2: TARNET PM GAN")
file1.write("\nTARNET PM GAN, Bias att: {0}, SD: {1}"
.format(bias_att_tarnet_PM_GAN_mean, bias_att_tarnet_PM_GAN_std))
file1.write("\nTARNET PM GAN, Policy Risk: {0}, SD: {1}"
.format(policy_risk_tarnet_PM_GAN_mean,
policy_risk_tarnet_PM_GAN_std))
file1.write("\n--" * 3)
file1.write("\n###" * 3)
Utils.write_to_csv(run_parameters["consolidated_file_path"], results_list)
def __get_run_parameters(self):
run_parameters = {}
if self.running_mode == "original_data":
run_parameters["input_nodes"] = 17
# run_parameters["consolidated_file_path"] = "./MSE/Results_consolidated.csv"
# NN
run_parameters["nn_prop_file"] = "./MSE/NN_Prop_score_{0}.csv"
# ite files DCN
run_parameters["DCN_PD"] = "./MSE/ITE/ITE_DCN_PD_iter_{0}.csv"
run_parameters["DCN_PD_02"] = "./MSE/ITE/ITE_DCN_PD_02_iter_{0}.csv"
run_parameters["DCN_PD_05"] = "./MSE/ITE/ITE_DCN_PD_05_iter_{0}.csv"
run_parameters["DCN_PM_GAN"] = "./MSE/ITE/ITE_DCN_PM_GAN_iter_{0}.csv"
run_parameters["DCN_PM_GAN_02"] = "./MSE/ITE/ITE_DCN_PM_GAN_dropout_02_iter_{0}.csv"
run_parameters["DCN_PM_GAN_05"] = "./MSE/ITE/ITE_DCN_PM_GAN_dropout_05_iter_{0}.csv"
run_parameters["DCN_PM_GAN_PD"] = "./MSE/ITE/ITE_DCN_PM_GAN_dropout_PD_iter_{0}.csv"
# model paths DCN
run_parameters["Model_DCN_PD_shared"] = "./Models/DCN_PD/DCN_PD_shared_iter_{0}.pth"
run_parameters["Model_DCN_PD_y1"] = "./Models/DCN_PD/DCN_PD_y1_iter_{0}.pth"
run_parameters["Model_DCN_PD_y0"] = "./Models/DCN_PD/DCN_PD_y2_iter_{0}.pth"
run_parameters["Model_DCN_PD_02_shared"] = "./Models/DCN_PD_02/DCN_PD_02_shared_iter_{0}.pth"
run_parameters["Model_DCN_PD_02_y1"] = "./Models/DCN_PD_02/DCN_PD_02_y1_iter_{0}.pth"
run_parameters["Model_DCN_PD_02_y0"] = "./Models/DCN_PD_02/DCN_PD_02_y2_iter_{0}.pth"
run_parameters["Model_DCN_PD_05_shared"] = "./Models/DCN_PD_05/DCN_PD_05_shared_iter_{0}.pth"
run_parameters["Model_DCN_PD_05_y1"] = "./Models/DCN_PD_05/DCN_PD_05_y1_iter_{0}.pth"
run_parameters["Model_DCN_PD_05_y0"] = "./Models/DCN_PD_05/DCN_PD_05_y2_iter_{0}.pth"
run_parameters["Model_DCN_PM_GAN_shared"] = "./Models/PM_GAN/DCN_PM_GAN_shared_iter_{0}.pth"
run_parameters["Model_DCN_PM_GAN_y1"] = "./Models/PM_GAN/DCN_PM_GAN_iter_y1_{0}.pth"
run_parameters["Model_DCN_PM_GAN_y0"] = "./Models/PM_GAN/DCN_PM_GAN_iter_y0_{0}.pth"
run_parameters[
"Model_DCN_PM_GAN_02_shared"] = "./Models/PM_GAN_DR_02/DCN_PM_GAN_dropout_02_shared_iter_{0}.pth"
run_parameters["Model_DCN_PM_GAN_02_y1"] = "./Models/PM_GAN_DR_02/DCN_PM_GAN_dropout_02_y1_iter_{0}.pth"
run_parameters["Model_DCN_PM_GAN_02_y0"] = "./Models/PM_GAN_DR_02/DCN_PM_GAN_dropout_02_y0_iter_{0}.pth"
run_parameters[
"Model_DCN_PM_GAN_05_shared"] = "./Models/PM_GAN_DR_05/DCN_PM_GAN_dropout_05_shared_iter_{0}.pth"
run_parameters["Model_DCN_PM_GAN_05_y1"] = "./Models/PM_GAN_DR_05/DCN_PM_GAN_dropout_05_y1_iter_{0}.pth"
run_parameters["Model_DCN_PM_GAN_05_y0"] = "./Models/PM_GAN_DR_05/DCN_PM_GAN_dropout_05_y0_iter_{0}.pth"
run_parameters[
"Model_DCN_PM_GAN_PD_shared"] = "./Models/PM_GAN_PD/DCN_PM_GAN_dropout_PD_shared_iter_{0}.pth"
run_parameters["Model_DCN_PM_GAN_PD_y1"] = "./Models/PM_GAN_PD/DCN_PM_GAN_dropout_PD_y1_iter_{0}.pth"
run_parameters["Model_DCN_PM_GAN_PD_y0"] = "./Models/PM_GAN_PD/DCN_PM_GAN_dropout_PD_y0_iter_{0}.pth"
run_parameters["TARNET"] = "./MSE/ITE/ITE_TARNET_iter_{0}.csv"
run_parameters["TARNET_PM_GAN"] = "./MSE/ITE/ITE_TARNET_PM_GAN_iter_{0}.csv"
run_parameters["summary_file_name"] = "Details_original.txt"
run_parameters["is_synthetic"] = False
elif self.running_mode == "synthetic_data":
run_parameters["input_nodes"] = 75
run_parameters["consolidated_file_path"] = "./MSE_Augmented/Results_consolidated.csv"
run_parameters["is_synthetic"] = True
return run_parameters
def __load_data(self, train_path, test_path, iter_id):
if self.running_mode == "original_data":
return self.dL.load_train_test_jobs(train_path, test_path, iter_id)
elif self.running_mode == "synthetic_data":
return self.dL.load_train_test_jobs(train_path, test_path, iter_id)
def __get_ps_model(self, ps_model_type, iter_id,
input_nodes, device):
ps_train_set = self.dL.convert_to_tensor(self.np_covariates_X_train, self.np_covariates_T_train)
ps_val_set = self.dL.convert_to_tensor(self.np_covariates_X_val,
self.np_covariates_T_val)
ps_test_set = self.dL.convert_to_tensor(self.np_covariates_X_test,
self.np_covariates_T_test)
ps_manager = PS_Manager()
if ps_model_type == Constants.PS_MODEL_NN:
return ps_manager.get_propensity_scores(ps_train_set,
ps_val_set,
ps_test_set, iter_id,
input_nodes, device)
elif ps_model_type == Constants.PS_MODEL_LR:
return ps_manager.get_propensity_scores_using_LR(self.np_covariates_X_train,
self.np_covariates_T_train,
self.np_covariates_X_val,
self.np_covariates_X_test,
regularized=False)
elif ps_model_type == Constants.PS_MODEL_LR_Lasso:
return ps_manager.get_propensity_scores_using_LR(self.np_covariates_X_train,
self.np_covariates_T_train,
self.np_covariates_X_val,
self.np_covariates_X_test,
regularized=True)
@staticmethod
def cal_policy_val(t, yf, eff_pred):
# policy_val(t[e>0], yf[e>0], eff_pred[e>0], compute_policy_curve)
if np.any(np.isnan(eff_pred)):
return np.nan, np.nan
policy = eff_pred > 0
treat_overlap = (policy == t) * (t > 0)
control_overlap = (policy == t) * (t < 1)
if np.sum(treat_overlap) == 0:
treat_value = 0
else:
treat_value = np.mean(yf[treat_overlap])
if np.sum(control_overlap) == 0:
control_value = 0
else:
control_value = np.mean(yf[control_overlap])
pit = np.mean(policy)
policy_value = pit * treat_value + (1 - pit) * control_value
return policy_value
def __process_evaluated_metric(self, y_f, e, T,
y1_hat, y0_hat,
ite_dict, predicted_ITE_list,
ite_csv_path,
iter_id):
y1_hat_np = np.array(y1_hat)
y0_hat_np = np.array(y0_hat)
e_np = np.array(e)
t_np = np.array(T)
np_y_f = np.array(y_f)
y1_hat_np_b = 1.0 * (y1_hat_np > 0.5)
y0_hat_np_b = 1.0 * (y0_hat_np > 0.5)
err_fact = np.mean(np.abs(y1_hat_np_b - np_y_f))
att = np.mean(np_y_f[t_np > 0]) - np.mean(np_y_f[(1 - t_np + e_np) > 1])
eff_pred = y0_hat_np - y1_hat_np
eff_pred[t_np > 0] = -eff_pred[t_np > 0]
ate_pred = np.mean(eff_pred[e_np > 0])
atc_pred = np.mean(eff_pred[(1 - t_np + e_np) > 1])
att_pred = np.mean(eff_pred[(t_np + e_np) > 1])
bias_att = | np.abs(att_pred - att) | numpy.abs |
from graphmix import Graph
import numpy as np
import scipy.sparse as sp
import os
import sys
import math
import argparse
import pickle as pkl
import networkx as nx
'''
Usage example: (in Dir Hetu/)
python ./tests/test_DistGCN/prepare_data_GCN15d.py --size 8 --replication 2 --dataset Reddit
'''
def coo_slice(a, row_range, col_range):
a = a.tocoo()
condition = np.where((a.row >= row_range[0]) & (a.row < row_range[1]) & (
a.col >= col_range[0]) & (a.col < col_range[1]))
return sp.coo_matrix((a.data[condition], (a.row[condition]-row_range[0], a.col[condition]-col_range[0])), shape=(row_range[1]-row_range[0], col_range[1]-col_range[0]))
def get_adj_matrix_all(A, replication, size, dir_name):
node_count = A.shape[0]
n_per_proc = math.ceil(float(node_count) / (size // replication))
stages = size // (replication ** 2)
col_block = stages*n_per_proc
row_block = math.ceil(float(node_count)/(size//replication))
for rank in range(size):
rank_row = rank // replication # i
rank_col = rank % replication # j
col_start = int(col_block*rank_col)
col_end = int(col_block*(rank_col+1))
if col_end > node_count:
col_end = node_count
row_start = int(row_block*rank_row)
row_end = int(row_block*(rank_row+1))
if row_end > node_count:
row_end = node_count
a = coo_slice(A.tocoo(), row_range=(row_start, row_end),
col_range=(col_start, col_end))
sp.save_npz(dir_name+"adj_part"+str(rank)+".npz", a)
print("adj_part: rank = %d" % rank, a.shape, len(a.data))
def get_inputs(H, replication, rank, size):
node_count = H.shape[0]
rank_row = rank // replication # i
row_block = math.ceil(float(node_count)/(size//replication))
row_start = int(row_block*rank_row)
row_end = int(row_block*(rank_row+1))
if row_end > node_count:
row_end = node_count
h = H[row_start:row_end, :]
print("inputs_part: rank = %d" % rank, h.shape)
return h
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def check_sparsity(adj):
if args.size == -1:
return
adj = adj.tocoo()
node_count = adj.shape[0]
block_num = args.size//args.replication
p = math.ceil(float(node_count)/(args.size//args.replication))
starts = list(range(0, node_count, p))
ends = list(range(p, node_count, p))+[node_count]
sparsity = np.zeros(shape=(block_num, block_num), dtype=int)
for i in range(block_num):
for j in range(block_num):
sparsity[i, j] = np.where((adj.row >= starts[i]) & (adj.row < ends[i]) & (
adj.col >= starts[j]) & (adj.col < ends[j]))[0].shape[0]
print(sparsity)
def load_data(args):
dataset = args.dataset
data_dir = './tests/test_DistGCN/datasets/%s/' % dataset
# ---load data---
if dataset == "Reddit":
adj = sp.load_npz(data_dir+'raw/reddit_graph.npz')
inputs = np.load(data_dir+'raw/reddit_data.npz')
x, y = inputs['feature'], inputs['label']
elif dataset == 'Proteins':
adj = sp.load_npz(data_dir+'protein_adj.npz')
y = np.load(data_dir+'protein_labels.npy')
y = y.astype(int)
np.random.seed(123)
bounds = np.sqrt(6.0 / (132534 + 602))
x = np.random.uniform(low=-bounds, high=bounds,
size=[132534, 602]).astype(np.float32)
elif dataset == 'Arch':
adj = sp.load_npz(data_dir+'arch_adj.npz')
y = np.random.randint(10, size=adj.shape[0])
np.random.seed(123)
bounds = np.sqrt(6.0 / (adj.shape[0] + 602))
x = np.random.uniform(low=-bounds, high=bounds,
size=[adj.shape[0], 602]).astype(np.float32)
elif dataset == 'Products':
adj = sp.load_npz(data_dir+'products_adj.npz')
x = np.load(data_dir+'products_feat.npy')
y = np.load(data_dir+'products_label.npy').astype(np.int)
elif dataset == 'Youtube':
adj = np.load(data_dir+'youtube_coo.npy', allow_pickle=True).item()
np.random.seed(123)
bounds = np.sqrt(6.0 / (adj.shape[0] + 602))
x = np.random.uniform(low=-bounds, high=bounds,
size=[adj.shape[0], 602]).astype(np.float32)
y = np.load(data_dir+'youtube_label.npy')
graph = Graph(edge_index=np.vstack(
[adj.row, adj.col]), num_nodes=x.shape[0])
# ---preprocess graph---
graph.add_self_loop()
normed_val = graph.gcn_norm(True)
node_count = graph.num_nodes
# ---construct adj,x,y---
edge_index = graph.edge_index
adj = sp.coo_matrix(
(normed_val, (edge_index[0], edge_index[1])), shape=(node_count, node_count))
# ---check block sparsity---
print('Sparsity before reordering:')
check_sparsity(adj)
if args.shuffle == 1:
print("Shuffle the graph...")
order = np.random.permutation(node_count)
adj = adj.tocsr()[:, order][order]
x = x[order, :]
y = y[order]
print('Sparsity after Shuffle:')
check_sparsity(adj)
print('node_count = %d, num_features = %d, num_classes = %d, edge_count = %d' % (
adj.shape[0], x.shape[1], np.max(y)+1, len(adj.data)))
return adj, x, y
def prepare_data(args, prepare_all_data=False):
dataset, replication, size = args.dataset, args.replication, args.size
print("Preparing data...")
adj_all, input_all, label_all = load_data(args)
if prepare_all_data:
size_set = [1, 2, 4, 8, 4, 8]
replication_set = [1, 1, 1, 1, 2, 2]
else:
size_set = [size]
replication_set = [replication]
for i in range(len(size_set)):
replication, size = replication_set[i], size_set[i]
print("size=%d, replication=%s, dataset=%s" %
(size, replication, dataset))
if size == 1: # whole graph for single GPU
replication = 1
dir_name = "./tests/test_DistGCN/data_GCN15d/%s_whole_graph/" % dataset
if not os.path.exists(dir_name):
os.makedirs(dir_name)
adj_all = adj_all.tocoo()
sp.save_npz(dir_name+"adj_whole.npz", adj_all)
print("adj_whole: ", adj_all.shape, len(adj_all.data))
| np.save(dir_name+"input_whole.npy", input_all) | numpy.save |
"""
利用 SMO 算法实现的线性 SVM 和非线性 SVM.
"""
import numpy as np
from functools import partial
class LinearSVC(object):
def __init__(self, C=0.6, tol=1e-3, max_iter=50):
self.C = C
self.tol = tol
self.max_iter = max_iter
self.err_cache = None
self.alphas = None
self.intercept_ = 0.0
def fit(self, X, y):
self.X = X
self.y = y
self.alphas = np.zeros(X.shape[0])
self.err_cache = | np.zeros((X.shape[0], 2)) | numpy.zeros |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
UCCSD analytical nuclear gradients
'''
import time
import ctypes
import numpy
from pyscf import lib
from functools import reduce
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import _ccsd
from pyscf.cc import uccsd_rdm
from pyscf.scf import ucphf
from pyscf.grad import rhf as rhf_grad
from pyscf.grad import ccsd as ccsd_grad
#
# Note: only works with canonical orbitals
# Non-canonical formula refers to JCP, 95, 2639
#
def kernel(mycc, t1=None, t2=None, l1=None, l2=None, eris=None, atmlst=None,
mf_grad=None, d1=None, d2=None, verbose=logger.INFO):
if eris is not None:
if (abs(eris.focka - numpy.diag(eris.focka.diagonal())).max() > 1e-3 or
abs(eris.fockb - numpy.diag(eris.fockb.diagonal())).max() > 1e-3):
raise RuntimeError('UCCSD gradients does not support NHF (non-canonical HF)')
if t1 is None: t1 = mycc.t1
if t2 is None: t2 = mycc.t2
if l1 is None: l1 = mycc.l1
if l2 is None: l2 = mycc.l2
if mf_grad is None: mf_grad = mycc._scf.nuc_grad_method()
log = logger.new_logger(mycc, verbose)
time0 = time.clock(), time.time()
log.debug('Build uccsd rdm1 intermediates')
if d1 is None:
d1 = uccsd_rdm._gamma1_intermediates(mycc, t1, t2, l1, l2)
time1 = log.timer_debug1('rdm1 intermediates', *time0)
log.debug('Build uccsd rdm2 intermediates')
fdm2 = lib.H5TmpFile()
if d2 is None:
d2 = uccsd_rdm._gamma2_outcore(mycc, t1, t2, l1, l2, fdm2, True)
time1 = log.timer_debug1('rdm2 intermediates', *time1)
mol = mycc.mol
mo_a, mo_b = mycc.mo_coeff
mo_ea, mo_eb = mycc._scf.mo_energy
nao, nmoa = mo_a.shape
nmob = mo_b.shape[1]
nocca = numpy.count_nonzero(mycc.mo_occ[0] > 0)
noccb = numpy.count_nonzero(mycc.mo_occ[1] > 0)
nvira = nmoa - nocca
nvirb = nmob - noccb
with_frozen = not (mycc.frozen is None or mycc.frozen is 0)
moidx = mycc.get_frozen_mask()
OA_a, VA_a, OF_a, VF_a = ccsd_grad._index_frozen_active(moidx[0], mycc.mo_occ[0])
OA_b, VA_b, OF_b, VF_b = ccsd_grad._index_frozen_active(moidx[1], mycc.mo_occ[1])
log.debug('symmetrized rdm2 and MO->AO transformation')
# Roughly, dm2*2 is computed in _rdm2_mo2ao
mo_active = (mo_a[:,numpy.hstack((OA_a,VA_a))],
mo_b[:,numpy.hstack((OA_b,VA_b))])
_rdm2_mo2ao(mycc, d2, mo_active, fdm2) # transform the active orbitals
time1 = log.timer_debug1('MO->AO transformation', *time1)
hf_dm1a, hf_dm1b = mycc._scf.make_rdm1(mycc.mo_coeff, mycc.mo_occ)
hf_dm1 = hf_dm1a + hf_dm1b
if atmlst is None:
atmlst = range(mol.natm)
offsetdic = mol.offset_nr_by_atom()
diagidx = numpy.arange(nao)
diagidx = diagidx*(diagidx+1)//2 + diagidx
de = numpy.zeros((len(atmlst),3))
Imata = numpy.zeros((nao,nao))
Imatb = | numpy.zeros((nao,nao)) | numpy.zeros |
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import re
import os
from train_until import *
import spacy
import tensorflow as tf
from dnc.memory import KMemory
nlp = spacy.load('en')
anno_file = './dataset/MSR_en.csv'
dict_file = './dataset/MSR_en_dict.csv'
w2v_dict_file = './dataset/MSR_enW2V_dict.csv'
video_dir = './dataset/YouTubeClips/'
word2v_emb_file = './dataset/MSR_enW2V.npy'
def get_dataset(path='./dataset/'):
feat_files = [re.match('features_(\d+)_(\d+)\.npy', f) for f in os.listdir(path=path)]
feat_files_tup = []
for f in feat_files:
if f is not None:
feat_files_tup.append((
os.path.join(path, f.string),
int(f.group(1)),
int(f.group(2))
)) # (file_name, start_id, end_id)
feat_files_tup.sort(key=lambda x: x[1]) # sort by start data id.
return feat_files_tup
def plotVideo(feat):
def fun(x, y, arr):
if y < arr.shape[0] and x < arr.shape[1]:
return arr[y, x]
return 0
feat = feat.tolist()
# increase number of frame to 10 time, so we can see it better on plot.
feat10 = []
for f in feat:
for _ in range(5):
feat10.append(f)
feat10 = np.array(feat10)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = np.arange(0, feat10.shape[1], 1) # feat size
y = np.arange(0, feat10.shape[0], 1) # feat num
X, Y = np.meshgrid(x, y)
zs = np.array([fun(x, y, feat10) for x, y in zip(np.ravel(X), np.ravel(Y))])
Z = zs.reshape(X.shape)
ax.plot_surface(X, Y, Z)
# ax.plot_wireframe(X, Y, Z)
ax.set_xlabel('FC6 Feature')
ax.set_ylabel('Video Frame')
ax.set_zlabel('Feature Value')
# fig.imshow(feat10)
plt.show()
def onehot_vec2id(nparr):
result = []
for batch in nparr:
batch_vec = []
for step in batch:
batch_vec.append(step.argmax())
result.append(batch_vec)
return np.array(result)
data, lexicon_dict = load(anno_file, w2v_dict_file)
dataset = get_dataset()
d100 = np.load(dataset[0][0]).tolist()
k100 = list(d100.keys())
v_feat_means = []
feat_dic = {'some_word': ['CNN feature']}
for key in k100:
v_feature = d100[key]
if type(v_feature) is list:
v_feature = np.array(v_feature)
if len(v_feature.shape) < 2:
continue
v_feat_means.append(v_feature.mean(axis=0))
# for anno in data[10:40]:
# output_str = [str(i) for i in nlp(anno['Description'][:-5])]
# print(output_str)
# input('waiting...')
v_feat_means = | np.array(v_feat_means) | numpy.array |
import numpy as np
from astropy.convolution import convolve
def smooth(x, window_len=9, window='hanning'):
"""
Smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
Parameters
----------
x : array_like
the input signal
window_len : int
The length of the smoothing window
window : str
The type of window from 'flat', 'hanning', 'hamming', 'bartlett',
'blackman'
'flat' window will produce a moving average smoothing.
Returns
-------
out : The smoothed signal
Example
-------
>>> t=linspace(-2,2,0.1)
>>> x=sin(t)+randn(len(t))*0.1
>>> y=smooth(x)
See Also
--------
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman,
numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array
instead of a string
"""
if isinstance(x, list):
x = np.array(x)
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if len(x) < window_len:
print("length of x: ", len(x))
print("window_len: ", window_len)
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if (window_len % 2) == 0:
window_len = window_len + 1
if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError(
"Window is on of 'flat', 'hanning', \
'hamming', 'bartlett', 'blackman'")
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.'+window+'(window_len)')
y = convolve(x, w/w.sum(), normalize_kernel=False, boundary='extend')
# return the smoothed signal
return y
def blurImage(im, n, ny=None, ftype='boxcar'):
"""
Smooths a 2D image by convolving with a filter
Parameters
----------
im : array_like
The array to smooth
n, ny : int
The size of the smoothing kernel
ftype : str
The type of smoothing kernel. Either 'boxcar' or 'gaussian'
Returns
-------
res: array_like
The smoothed vector with shape the same as im
"""
from scipy import signal
n = int(n)
if not ny:
ny = n
else:
ny = int(ny)
# keep track of nans
nan_idx = np.isnan(im)
im[nan_idx] = 0
g = signal.boxcar(n) / float(n)
if 'box' in ftype:
if im.ndim == 1:
g = signal.boxcar(n) / float(n)
elif im.ndim == 2:
g = signal.boxcar(n) / float(n)
g = np.tile(g, (1, ny, 1))
g = g / g.sum()
g = np.squeeze(g) # extra dim introduced in np.tile above
elif im.ndim == 3: # mutlidimensional binning
g = signal.boxcar(n) / float(n)
g = np.tile(g, (1, ny, 1))
g = g / g.sum()
elif 'gaussian' in ftype:
x, y = np.mgrid[-n:n+1, 0-ny:ny+1]
g = np.exp(-(x**2/float(n) + y**2/float(ny)))
g = g / g.sum()
if np.ndim(im) == 1:
g = g[n, :]
if np.ndim(im) == 3:
g = np.tile(g, (1, ny, 1))
improc = signal.convolve(im, g, mode='same')
improc[nan_idx] = np.nan
return improc
def count_to(n):
"""By example:
# 0 1 2 3 4 5 6 7 8
n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
res = [0, 1, 2, 0, 1, 0, 1, 0]
That is, it is equivalent to something like this :
hstack((arange(n_i) for n_i in n))
This version seems quite a bit faster, at least for some
possible inputs, and at any rate it encapsulates a task
in a function.
"""
if n.ndim != 1:
raise Exception("n is supposed to be 1d array.")
n_mask = n.astype(bool)
n_cumsum = np.cumsum(n)
ret = np.ones(n_cumsum[-1]+1, dtype=int)
ret[n_cumsum[n_mask]] -= n[n_mask]
ret[0] -= 1
return np.cumsum(ret)[:-1]
def repeat_ind(n: np.array):
"""
Examples
--------
>>> n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
>>> res = repeat_ind(n)
>>> res = [2, 2, 2, 5, 5, 7, 7, 8]
That is the input specifies how many times to repeat the given index.
It is equivalent to something like this :
hstack((zeros(n_i,dtype=int)+i for i, n_i in enumerate(n)))
But this version seems to be faster, and probably scales better, at
any rate it encapsulates a task in a function.
"""
if n.ndim != 1:
raise Exception("n is supposed to be 1d array.")
res = [[idx]*a for idx, a in enumerate(n) if a != 0]
return np.concatenate(res)
def rect(r, w, deg=False):
"""
Convert from polar (r,w) to rectangular (x,y)
x = r cos(w)
y = r sin(w)
"""
# radian if deg=0; degree if deg=1
if deg:
w = np.pi * w / 180.0
return r * np.cos(w), r * np.sin(w)
def polar(x, y, deg=False):
"""
Converts from rectangular coordinates to polar ones
Parameters
----------
x, y : array_like, list_like
The x and y coordinates
deg : int
radian if deg=0; degree if deg=1
Returns
-------
p : array_like
The polar version of x and y
"""
if deg:
return np.hypot(x, y), 180.0 * np.arctan2(y, x) / np.pi
else:
return np.hypot(x, y), np.arctan2(y, x)
def bwperim(bw, n=4):
"""
perim = bwperim(bw, n=4)
Find the perimeter of objects in binary images.
A pixel is part of an object perimeter if its value is one and there
is at least one zero-valued pixel in its neighborhood.
By default the neighborhood of a pixel is 4 nearest pixels, but
if `n` is set to 8 the 8 nearest pixels will be considered.
Parameters
----------
bw : A black-and-white image
n : Connectivity. Must be 4 or 8 (default: 8)
Returns
-------
perim : A boolean image
"""
if n not in (4, 8):
raise ValueError('mahotas.bwperim: n must be 4 or 8')
rows, cols = bw.shape
# Translate image by one pixel in all directions
north = np.zeros((rows, cols))
south = np.zeros((rows, cols))
west = np.zeros((rows, cols))
east = np.zeros((rows, cols))
north[:-1, :] = bw[1:, :]
south[1:, :] = bw[:-1, :]
west[:, :-1] = bw[:, 1:]
east[:, 1:] = bw[:, :-1]
idx = (north == bw) & \
(south == bw) & \
(west == bw) & \
(east == bw)
if n == 8:
north_east = np.zeros((rows, cols))
north_west = np.zeros((rows, cols))
south_east = np.zeros((rows, cols))
south_west = | np.zeros((rows, cols)) | numpy.zeros |
# routines for comparing gravities with asteroseismic sample
from apogee.utils import apload
from apogee.utils import apselect
from astropy.io import fits
#from holtz.gal import isochrones
#from holtz.gal import stars
from tools import match
from tools import plots
from tools import fit
from apogee.utils import bitmask
try: from apogee.aspcap import cal
except: pass
from apogee.aspcap import err
from apogee.speclib import isochrones
import pdb
import matplotlib.pyplot as plt
import numpy as np
import os
import astropy
from scipy import interpolate
def rcrgb(allstar,apokasc='APOKASC_cat_v3.6.0.fits',logg='LOGG_SYD_SCALING',rclim=np.array([2.38,3.5]),out='rcrgbsep') :
'''
asteroseismic log g comparisons for input allStar structure
'''
gd=apselect.select(allstar,badval=['STAR_BAD'],logg=[0,3.8],teff=[3500,5500],raw=True)
allstar=allstar[gd]
# match ASPCAP with APOKASC, and get RC/RGB stars
apokasc=fits.open(os.environ['APOGEE_DIR']+'/data/apokasc/'+apokasc)[1].data
# strip off .XXXX if we have it, e.g. from calibration fields where we have added .FIELD
apogee_id = np.array(np.core.defchararray.split(allstar['APOGEE_ID'],'.').tolist())[:,0]
i1,i2=match.match(apogee_id,apokasc['2MASS_ID'])
rgb=np.where(apokasc['CONS_EVSTATES'][i2] == 'RGB')[0]
rc=np.where(apokasc['CONS_EVSTATES'][i2] == 'RC')[0]
notrc=np.where(apokasc['CONS_EVSTATES'][i2] != 'RC')[0]
rc2=np.where((apokasc['CONS_EVSTATES'][i2] == '2CL') & (apokasc[logg][i2] > -1))[0]
rc2cl=np.where((apokasc['CONS_EVSTATES'][i2] == 'RC/2CL') & (apokasc[logg][i2] > -1))[0]
rcall=np.append(rc,rc2)
rcall=np.append(rcall,rc2cl)
rc=i1[rc]
rgb=i1[rgb]
rc2=i1[rc2]
rc2cl=i1[rc2cl]
rcall=i1[rcall]
# 2D fit for RGB Teff as f(log g, [M/H])
fig,ax=plots.multi(3,2,figsize=(12,8))
fit2d=fit.fit2d(allstar['FPARAM'][rgb,1]-2.5,allstar['FPARAM'][rgb,3],allstar['FPARAM'][rgb,0],plot=ax[0,0],xt='log g - 2.5',yt='[M/H]',zt='Teff')
rgbfit=fit2d.parameters
#histogram of RC logg values
ax[1,0].hist(allstar['FPARAM'][rc,1],color='b',bins=np.arange(0,5,0.05),log=True)
ax[1,0].set_xlim(1.5,3.5)
ax[1,0].set_ylim(0.1,1000)
ax[1,0].set_xlabel('log g')
#ax[1,0].hist(allstar['FPARAM'][rgb,1],color='r',bins=np.arange(0,5,0.05))
print('RC min log g: ',allstar['FPARAM'][rc,1].min())
print('RC max log g: ',allstar['FPARAM'][rc,1].max())
# limit log g range for RC
plots.plotl(ax[1,0],[rclim[0],rclim[0]],[0,1000],color='k')
plots.plotl(ax[1,0],[rclim[1],rclim[1]],[0,1000],color='k')
rclogg = np.where((allstar['FPARAM'][rc,1] > rclim[0]) & (allstar['FPARAM'][rc,1]<rclim[1]))[0]
rgblogg = np.where((allstar['FPARAM'][rgb,1] > rclim[0]) & (allstar['FPARAM'][rgb,1]<rclim[1]))[0]
dt=allstar['FPARAM'][:,0]-fit2d(allstar['FPARAM'][:,1]-2.5,allstar['FPARAM'][:,3])
nbest=10000
for dtcrit in range(-500,500) :
rcbd = np.where((dt[rc[rclogg]] < dtcrit))[0]
rgbbd = np.where(dt[rgb[rgblogg]] > dtcrit)[0]
nbd=len(rcbd)+len(rgbbd)
if nbd < nbest :
dtbest=dtcrit
nbest=nbd
dtcrit=dtbest
rcbd = np.where((dt[rc[rclogg]] < dtcrit))[0]
rgbbd = np.where(dt[rgb[rgblogg]] > dtcrit)[0]
print('dtcrit: ',dtcrit)
print('bad fractions (rc, rgb): ',float(len(rcbd))/len(rclogg),float(len(rgbbd))/len(rgblogg))
dt=allstar['FPARAM'][:,0]-(rgbfit[0]+rgbfit[1]*(allstar['FPARAM'][:,1]-2.5)+rgbfit[2]*allstar['FPARAM'][:,3])
cn=allstar['FPARAM'][:,4]-allstar['FPARAM'][:,5]
plots.plotc(ax[0,1],dt[rc],allstar['FPARAM'][rc,1],allstar['FPARAM'][rc,3],marker='s',xr=[-500,500],yr=[4,1],size=20,zr=[-2.0,0.5],xt='dt',yt='log g',zt='[M/H]',colorbar=True)
plots.plotc(ax[0,1],dt[rgb],allstar['FPARAM'][rgb,1],allstar['FPARAM'][rgb,3],marker='o',xr=[-500,500],yr=[4,1],size=20,zr=[-2.0,0.5])
plots.plotl(ax[0,1],[-500,500],[rclim[0],rclim[0]],color='k')
plots.plotl(ax[0,1],[-500,500],[rclim[1],rclim[1]],color='k')
ax[1,1].hist(dt[rc],color='b',bins=np.arange(-500,500,10))
ax[1,1].hist(dt[rgb],color='r',bins=np.arange(-500,500,10))
ax[1,1].hist(dt[rc2],color='g',bins=np.arange(-500,500,10))
ax[1,1].hist(dt[rc2cl],color='m',bins=np.arange(-500,500,10))
ax[1,1].set_xlabel('dt')
# plot dt vs C/N
#plots.plotc(ax[0,2],dt[rc[rclogg]],cn[rc[rclogg]],allstar['FPARAM'][rc[rclogg],1],marker='s',xr=[-500,500],yr=[-1.0,0.5],zr=[2,4],size=20,xt='dt',yt='[C/N]',zt='log g',colorbar=True)
#plots.plotc(ax[1,2],dt[rgb[rgblogg]],cn[rgb[rgblogg]],allstar['FPARAM'][rgb[rgblogg],1],marker='o',xr=[-500,500],yr=[-1.0,0.5],zr=[2,4],size=20,xt='dt',yt='[C/N]',zt='log g',colorbar=True)
plots.plotc(ax[0,2],dt[rc[rclogg]],cn[rc[rclogg]],allstar['FPARAM'][rc[rclogg],3],marker='s',xr=[-500,500],yr=[-1.0,0.5],zr=[-1.5,0.5],size=20,xt='dt',yt='[C/N]',zt='[M/H]',colorbar=True)
plots.plotc(ax[1,2],dt[rgb[rgblogg]],cn[rgb[rgblogg]],allstar['FPARAM'][rgb[rgblogg],3],marker='o',xr=[-500,500],yr=[-1.0,0.5],zr=[-1.5,0.5],size=20,xt='dt',yt='[C/N]',zt='[M/H]',colorbar=True)
cnslopebest=-0.2/100.
cnintbest=0.
nbest=10000
slopearray=np.arange(cnslopebest-20*0.0001,cnslopebest+20*0.0001,0.0001)
intarray=np.arange(cnintbest-10*0.02,cnintbest+10*0.02,0.02)
offarray=np.arange(-0.9,-0.3,0.01)
x=np.array([-200,400])
for cnslope in slopearray :
for cnoff in offarray :
for cnint in intarray :
cnfit=np.array([cnint,cnoff,cnslope])
rgbbd=np.where(cn[rgb[rgblogg]] > cnfit[0]+cnfit[1]*allstar['FPARAM'][rgb[rgblogg],3]+cnfit[2]*dt[rgb[rgblogg]])[0]
rcbd= np.where(cn[rc[rclogg]] < cnfit[0]+cnfit[1]*allstar['FPARAM'][rc[rclogg],3]+cnfit[2]*dt[rc[rclogg]])[0]
nbd=float(len(rcbd))/len(rclogg)+float(len(rgbbd))/len(rgblogg)
if nbd < nbest :
cnfitbest=cnfit
nbest=nbd
print(nbest)
cnfit=cnfitbest
x=np.array([-200,400])
for i in [0,1] :
for j in [2] :
ax[i,j].plot(x,cnfit[0]+cnfit[1]*0.+x*cnfit[2])
ax[i,j].plot(x,cnfit[0]+cnfit[1]*(-0.5)+x*cnfit[2])
ax[i,j].plot(x,cnfit[0]+cnfit[1]*0.5+x*cnfit[2])
rcbd = np.where((cn[rc[rclogg]] < cnfit[0]+cnfit[1]*allstar['FPARAM'][rc[rclogg],3]+cnfit[2]*dt[rc[rclogg]]))[0]
rgbbd = np.where((cn[rgb[rgblogg]] > cnfit[0]+cnfit[1]*allstar['FPARAM'][rgb[rgblogg],3]+cnfit[2]*dt[rgb[rgblogg]]))[0]
ax[0,2].text(0.98,0.98,'RC bad: {:5.3f}'.format(float(len(rcbd))/len(rclogg)),transform=ax[0,2].transAxes,va='top',ha='right')
ax[1,2].text(0.98,0.98,'RGB bad: {:5.3f}'.format(float(len(rgbbd))/len(rgblogg)),transform=ax[1,2].transAxes,va='top',ha='right')
print('bad fractions (rc, rgb): ',float(len(rcbd))/len(rclogg),float(len(rgbbd))/len(rgblogg),len(rcbd),len(rclogg),len(rgbbd),len(rgblogg))
plt.tight_layout()
if out is not None :
plt.savefig(out+'.png')
plt.close(fig)
fig,ax=plots.multi(2,1)
plots.plotp(ax[0],allstar['FPARAM'][rgb,0],allstar['FPARAM'][rgb,1],color='r',xr=[5500,3500],yr=[4,1],xt='Teff',yt='log g')
plots.plotp(ax[0],allstar['FPARAM'][rc,0],allstar['FPARAM'][rc,1],color='b')
plots.plotp(ax[0],allstar['FPARAM'][rc2,0],allstar['FPARAM'][rc2,1],color='g')
plots.plotp(ax[0],allstar['FPARAM'][rc2cl,0],allstar['FPARAM'][rc2cl,1],color='m')
x = -0.08 - 0.5*allstar['FPARAM'][:,3] - 0.0039*dt
plots.plotp(ax[1],x[rgb],cn[rgb],color='r',xt='-0.08-0.5[M/H]-0.0039 dt',yt='[C/N]',xr=[-2.5,1.5],yr=[-2,1],nxtick=5)
plots.plotp(ax[1],x[rc],cn[rc],color='b')
plots.plotp(ax[1],x[rc2],cn[rc2],color='g')
plots.plotp(ax[1],x[rc2cl],cn[rc2cl],color='m')
ax[1].plot([-2,1.5],[-2,1.5])
fig.tight_layout()
if out is not None :
plt.savefig(out+'_hr.pdf')
plt.close(fig)
return {'rclim' : rclim, 'rgbsep' : rgbfit, 'cnsep' : cnfit}
def rcrgb_plot(a,out=None) :
""" Plot logg classification from bitmask
"""
b=bitmask.ParamBitMask()
rgb=np.where((a['PARAMFLAG'][:,1] & b.getval('LOGG_CAL_RGB')) > 0)[0]
rc=np.where((a['PARAMFLAG'][:,1] & b.getval('LOGG_CAL_RC')) > 0)[0]
ms=np.where((a['PARAMFLAG'][:,1] & b.getval('LOGG_CAL_MS')) > 0)[0]
rgb_ms=np.where((a['PARAMFLAG'][:,1] & b.getval('LOGG_CAL_RGB_MS')) > 0)[0]
fig,ax = plots.multi(1,1)
plots.plotp(ax,a['FPARAM'][rgb,0],a['FPARAM'][rgb,1],color='r',size=1,
xr=[8000,3000],yr=[6,-1],xt='$T_{eff}$',yt='log g')
plots.plotp(ax,a['FPARAM'][rc,0],a['FPARAM'][rc,1],color='b',size=1)
plots.plotp(ax,a['FPARAM'][ms,0],a['FPARAM'][ms,1],color='g',size=1)
plots.plotp(ax,a['FPARAM'][rgb_ms,0],a['FPARAM'][rgb_ms,1],color='m',size=1)
if out is not None :
fig.savefig(out+'.png')
plt.close()
def dwarf(allstar,mhrange=[-2.5,1.0],loggrange=[3.8,5.5],teffrange=[3000,7500],apokasc_cat='APOKASC_cat_v4.4.2.fits',out='logg',calib=False) :
""" logg calibration for dwarfs, from asteroseismic and isochrones
"""
if calib :
param = 'PARAM'
else :
param = 'FPARAM'
gd=apselect.select(allstar,badval=['STAR_BAD'],mh=mhrange,logg=loggrange,teff=teffrange,raw=True)
allstar=allstar[gd]
try:
gd=np.where(allstar['VISIT'] == 0)[0]
allstar=allstar[gd]
except: pass
# match ASPCAP with APOKASC, and get RC/RGB stars
apokasc=fits.open(os.environ['APOGEE_DIR']+'/data/apokasc/'+apokasc_cat)[1].data
# strip off .XXXX if we have it, e.g. from calibration fields where we have added .FIELD
apogee_id = np.array(np.core.defchararray.split(allstar['APOGEE_ID'],'.').tolist())[:,0]
i1,i2=match.match(apogee_id,apokasc['2MASS_ID'])
# now get isochrone logg from lower main sequence
isologg=isochrone(allstar,snrbd=50)
isochrone_id = np.array(np.core.defchararray.split(isologg['APOGEE_ID'],'.').tolist())[:,0]
j1,j2=match.match(apogee_id,isochrone_id)
# plots of gravity differences
fig,ax=plots.multi(2,2)
plots.plotc(ax[0,0],allstar[param][i1,1],allstar[param][i1,1]-apokasc['LOGG_DW'][i2],allstar[param][i1,0],yr=[-1,1],
xt='log g',yt=r'$\Delta$logg',zt='Teff',colorbar=True,xr=[3,6],zr=[4000,7000])
plots.plotc(ax[0,1],allstar[param][i1,3],allstar[param][i1,1]-apokasc['LOGG_DW'][i2],allstar[param][i1,0],yr=[-1,1],
xt='[M/H]',yt=r'$\Delta$logg',zt='Teff',colorbar=True,xr=[-2,0.5],zr=[4000,7000])
plots.plotc(ax[1,0],allstar[param][i1,0],allstar[param][i1,1]-apokasc['LOGG_DW'][i2],10.**allstar[param][i1,2],yr=[-1,1],
xt='Teff',yt=r'$\Delta$logg',zt='vmicro',colorbar=True,xr=[3000,8000],zr=[0.5,2.5])
plots.plotc(ax[1,1],allstar[param][i1,0],allstar[param][i1,1]-apokasc['LOGG_DW'][i2],allstar[param][i1,3],yr=[-1,1],
xt='Teff',yt=r'$\Delta$logg',zt='[M/H]',colorbar=True,xr=[3000,8000],zr=[-2,0.5])
# only add main sequence in Teff plot
plots.plotc(ax[1,1],allstar[param][j1,0],allstar[param][j1,1]-isologg['ISOLOGG'][j2],allstar[param][j1,3],zr=[-2,0.5])
plt.tight_layout()
# 2D fit as f(Teff,[M/H]), using both APOKASC and isochrone log g
gd=np.where(apokasc['LOGG_DW'][i2] > -99)[0]
tfit=allstar[param][i1[gd],0]
mhfit=allstar[param][i1[gd],3]
diff=allstar[param][i1[gd],1]-apokasc['LOGG_DW'][i2[gd]]
snrfit=allstar['SNR'][i1[gd]]
# do fit from high S/N, but get uncertainties from all
gd=np.where(allstar['SNR'][j1] > 300)[0]
msfit = fit.fit2d(np.append(tfit,allstar[param][j1[gd],0]),
np.append(mhfit,allstar[param][j1[gd],3]),
np.append(diff,allstar[param][j1[gd],1]-isologg['ISOLOGG'][j2[gd]]),degree=1,reject=0.3)
# for uncertainties, all all S/N
tfit=np.append(tfit,allstar[param][j1,0])
mhfit=np.append(mhfit,allstar[param][j1,3])
diff=np.append(diff,allstar[param][j1,1]-isologg['ISOLOGG'][j2])
snrfit=np.append(snrfit,allstar['SNR'][j1])
mserrpar = err.errfit(tfit,np.clip(snrfit,0.,249.),mhfit,diff-msfit(tfit,mhfit),
out=out+'_ms',title='log g',zr=[0,0.2])
#mserrpar=np.zeros([4])
# plot the relation
tfit=np.arange(teffrange[0],teffrange[1],10)
mhfit=tfit*0.
plots.plotl(ax[1,1],tfit,msfit(tfit,mhfit),color='orange',linewidth=1.5)
mhfit=tfit*0-1.
plots.plotl(ax[1,1],tfit,msfit(tfit,mhfit),color='c',linewidth=1.5)
mhfit=tfit*0+0.5
plots.plotl(ax[1,1],tfit,msfit(tfit,mhfit),color='r',linewidth=1.5)
ax[1,1].grid()
if out is not None:
fig.savefig(out+'_dwarfs.png')
plt.close()
# HR diagram plot color coded by asteroseismic gravity differences
hrfig,hrax=plots.multi(1,2,hspace=0.001)
gd=np.where(apokasc['APOKASC2_LOGG'][i2] > -99)[0]
plots.plotc(hrax[0],allstar[param][i1[gd],0],allstar[param][i1[gd],1],allstar[param][i1[gd],1]-apokasc['APOKASC2_LOGG'][i2[gd]],
xr=[8000,3000],yr=[6,0],zr=[-0.5,0.5],colorbar=True,zt=r'$\Delta$ logg',xt='Teff',yt='logg')
plots.plotc(hrax[0],allstar[param][j1,0],allstar[param][j1,1],allstar[param][j1,1]-isologg['ISOLOGG'][j2],zr=[-0.5,0.5])
plots.plotc(hrax[1],allstar[param][i1[gd],0],apokasc['APOKASC2_LOGG'][i2[gd]],allstar[param][i1[gd],1]-apokasc['APOKASC2_LOGG'][i2[gd]],
xr=[8000,3000],yr=[6,0],zr=[-0.5,0.5],colorbar=True,zt=r'$\Delta$ logg',xt='Teff',yt='APOKASC logg')
# use asteroseismic logg on y axis
gd=np.where(apokasc['LOGG_DW'][i2] > -99)[0]
plots.plotc(hrax[0],allstar[param][i1[gd],0],allstar[param][i1[gd],1],allstar[param][i1[gd],1]-apokasc['LOGG_DW'][i2[gd]],
xr=[8000,3000],yr=[6,0],zr=[-0.5,0.5])
plots.plotc(hrax[1],allstar[param][i1[gd],0],apokasc['LOGG_DW'][i2[gd]],allstar[param][i1[gd],1]-apokasc['LOGG_DW'][i2[gd]],
xr=[8000,3000],yr=[6,0],zr=[-0.5,0.5])
if out is not None:
hrfig.savefig(out+'_all.png')
plt.close()
return {'calloggmin' : loggrange[0], 'calloggmax' : loggrange[1], 'loggmin' : loggrange[0], 'loggmax' : loggrange[1],
'mhmin' : mhrange[0], 'mhmax' : mhrange[1], 'temin': teffrange[0], 'temax' : teffrange[1],
'msfit' : msfit.parameters, 'errpar' : mserrpar }
def apokasc(allstar,apokasc_cat='APOKASC_cat_v4.4.2.fits',raw=True,plotcal=False,out='loggcomp',calloggrange=[-1.,3.8],loggrange=[-1.,3.8],mhrange=[-2.5,0.5],teffrange=[3500,5500],calteffrange=[3000,6000],calib=False) :
'''
asteroseismic log g comparisons for input allStar structure
'''
if calib :
param = 'PARAM'
else :
param = 'FPARAM'
gd=apselect.select(allstar,badval=['STAR_BAD'],mh=mhrange,logg=loggrange,teff=teffrange,raw=True)
allstar=allstar[gd]
# match ASPCAP with APOKASC, and get RC/RGB stars
apokasc=fits.open(os.environ['APOGEE_DIR']+'/data/apokasc/'+apokasc_cat)[1].data
# strip off .XXXX if we have it, e.g. from calibration fields where we have added .FIELD
apogee_id = np.array(np.core.defchararray.split(allstar['APOGEE_ID'],'.').tolist())[:,0]
i1,i2=match.match(apogee_id,apokasc['2MASS_ID'])
try:
print('trying APOKASC2 catalog tags...')
logg='APOKASC2_LOGG'
rgb=np.where((apokasc['CONS_EVSTATES'][i2] == 'RGB') & (apokasc[logg][i2] > -1))[0]
rc=np.where((apokasc['CONS_EVSTATES'][i2] == 'RC') & (apokasc[logg][i2] > -1))[0]
notrc=np.where(apokasc['CONS_EVSTATES'][i2] != 'RC')[0]
rc2=np.where((apokasc['CONS_EVSTATES'][i2] == '2CL') & (apokasc[logg][i2] > -1))[0]
rc2cl=np.where((apokasc['CONS_EVSTATES'][i2] == 'RC/2CL') & (apokasc[logg][i2] > -1))[0]
# use LOGG_DW if we have it
dw=np.where((apokasc[logg][i2] < -99) & (apokasc['LOGG_DW'][i2] >-99) )[0]
apokasc[logg][i2[dw]] = apokasc['LOGG_DW'][i2[dw]]
rgb=np.append(rgb,dw)
except :
# DR14 used APOKASC_cat_v3.6.0
print('trying older APOKASC catalog tags...')
logg='LOGG_SYD_SCALING'
rgb=np.where((apokasc['CONS_EVSTATES'][i2] == 'RGB') & (apokasc[logg][i2] > -1))[0]
rc=np.where((apokasc['CONS_EVSTATES'][i2] == 'RC') & (apokasc[logg][i2] > -1))[0]
notrc=np.where(apokasc['CONS_EVSTATES'][i2] != 'RC')[0]
rc2=np.where((apokasc['CONS_EVSTATES'][i2] == '2CL') & (apokasc[logg][i2] > -1))[0]
rc2cl=np.where((apokasc['CONS_EVSTATES'][i2] == 'RC/2CL') & (apokasc[logg][i2] > -1))[0]
rcall=np.append(rc,rc2)
rcall=np.append(rcall,rc2cl)
# Do some 2D fits for RGB stars
fig,ax=plots.multi(2,1,figsize=(12,6))
# linear in logg and [M/H]
rgbfit = fit.fit2d(allstar['FPARAM'][i1[rgb],1],allstar['FPARAM'][i1[rgb],3],
allstar[param][i1[rgb],1]-apokasc[logg][i2[rgb]],zr=[-1,0.5],gdrange=[-2,2],yr=[-3,1],xr=[1,4],degree=1,
plot=ax[0],yt='[M/H]',xt='log g',zt='$\Delta log g$',reject=0.3)
# cubic in logg, linear in [M/H]
data=allstar[param][i1[rgb],1]-apokasc[logg][i2[rgb]]
design=np.ones([5,len(rgb)])
design[1,:]=allstar['FPARAM'][i1[rgb],1]
design[2,:]=allstar['FPARAM'][i1[rgb],1]**2
design[3,:]=allstar['FPARAM'][i1[rgb],1]**3
design[4,:]=allstar['FPARAM'][i1[rgb],3]
params=fit.linear(data,design)[0]
rgbrms=(allstar[param][i1[rgb],1]-rgbfit(allstar['FPARAM'][i1[rgb],1],allstar['FPARAM'][i1[rgb],3])-apokasc[logg][i2[rgb]]).std()
ax[0].text(0.98,0.98,'rms: {:5.3f}'.format(rgbrms),transform=ax[0].transAxes,va='top',ha='right')
rgberrpar = err.errfit(allstar[param][i1[rgb],0],allstar['SNR'][i1[rgb]],allstar[param][i1[rgb],3],
allstar[param][i1[rgb],1]-rgbfit(allstar['FPARAM'][i1[rgb],1],allstar['FPARAM'][i1[rgb],3])-apokasc[logg][i2[rgb]],
out=out+'_rgb',title='log g',zr=[0,0.2])
loggmin=allstar['FPARAM'][i1[rgb],1].min()
loggmax=allstar['FPARAM'][i1[rgb],1].max()
# RC fits
# linear in logg and [M/H]
rcfit = fit.fit2d(allstar['FPARAM'][i1[rc],1],allstar['FPARAM'][i1[rc],3],
allstar[param][i1[rc],1]-apokasc[logg][i2[rc]],zr=[-1,0.5],gdrange=[-2,2],yr=[-3,1],xr=[1,4],degree=1,
plot=ax[1],yt='[M/H]',xt='log g',zt='$\Delta log g$',reject=0.3)
# quadratic in logg
rcfit2 = fit.fit1d(allstar['FPARAM'][i1[rcall],1], allstar[param][i1[rcall],1]-apokasc[logg][i2[rcall]],zr=[-1,0.5],yr=[-3,1],xr=[1,4],degree=2,reject=0.3)
rcrms=(allstar[param][i1[rc],1]-rcfit(allstar['FPARAM'][i1[rc],1],allstar['FPARAM'][i1[rc],3])-apokasc[logg][i2[rc]]).std()
rcerrpar = err.errfit(allstar[param][i1[rc],0],allstar['SNR'][i1[rc]],allstar[param][i1[rc],3],
allstar[param][i1[rc],1]-rcfit(allstar['FPARAM'][i1[rc],1],allstar['FPARAM'][i1[rc],3])-apokasc[logg][i2[rc]],
out=out+'_rc',title='log g',zr=[0,0.2])
ax[1].text(0.98,0.98,'rms: {:5.3f}'.format(rcrms),transform=ax[1].transAxes,va='top',ha='right')
fig.tight_layout()
if out is not None :
fig.savefig(out+'.png')
plt.close()
# set up plots
if raw and plotcal :
fig,ax=plots.multi(2,3,hspace=0.5,wspace=0.001,figsize=(12,12))
else :
fig,tmpax=plots.multi(1,4,hspace=0.5,wspace=0.001,figsize=(8,10))
fig2,ax2=plots.multi(1,1)
# diff color-coded by gravity as f([M/H])
# diff color-coded by [M/H] as f(log g)
# RGB and RC as f(log g)
if raw :
if plotcal: tmpax=ax[:,0]
plots.plotc(tmpax[0],allstar['FPARAM'][i1,3],allstar[param][i1,1]-apokasc[logg][i2],
allstar['FPARAM'][i1,1],zr=[0,5],xr=[-2.5,0.5],yr=[-0.75,0.75],xt='[M/H]',yt='ASPCAP-seismic log g',zt='log g',size=15,colorbar=True)
plots.plotc(tmpax[1],allstar['FPARAM'][i1[rgb],1],allstar[param][i1[rgb],1]-apokasc[logg][i2[rgb]],
allstar['FPARAM'][i1[rgb],3],xr=[0,5],zr=[-2.5,0.5],yr=[-0.75,0.75],zt='[M/H]',yt='ASPCAP-seismic log g',xt='log g',size=15,colorbar=True)
loggfit=np.arange(1,3.5,0.01)
mhfit=loggfit*0.
plots.plotl(tmpax[1],loggfit,rgbfit(loggfit,mhfit),color='orange',linewidth=1.5)
plots.plotl(tmpax[1],loggfit,params[0]+params[1]*loggfit+params[2]*loggfit**2+params[3]*loggfit**3+params[4]*mhfit,color='orange',linewidth=1.5)
mhfit=loggfit*0-2.
plots.plotl(tmpax[1],loggfit,rgbfit(loggfit,mhfit),color='b',linewidth=1.5)
plots.plotl(tmpax[1],loggfit,params[0]+params[1]*loggfit+params[2]*loggfit**2+params[3]*loggfit**3+params[4]*mhfit,color='b',linewidth=1.5)
mhfit=loggfit*0-1.
plots.plotl(tmpax[1],loggfit,rgbfit(loggfit,mhfit),color='c',linewidth=1.5)
plots.plotl(tmpax[1],loggfit,params[0]+params[1]*loggfit+params[2]*loggfit**2+params[3]*loggfit**3+params[4]*mhfit,color='c',linewidth=1.5)
mhfit=loggfit*0+0.5
plots.plotl(tmpax[1],loggfit,rgbfit(loggfit,mhfit),color='r',linewidth=1.5)
plots.plotl(tmpax[1],loggfit,params[0]+params[1]*loggfit+params[2]*loggfit**2+params[3]*loggfit**3+params[4]*mhfit,color='r',linewidth=1.5)
tmpax[0].grid()
tmpax[1].grid()
iax=tmpax[2]
plots.plotp(iax,allstar['FPARAM'][i1[rgb],1],allstar[param][i1[rgb],1]-apokasc[logg][i2[rgb]],
xr=[0,5],yr=[-0.5,0.5],xt='seismic log g',yt='ASPCAP-seismic log g',label=[0.9,0.8,'RGB'],color='r',size=5)
plots.plotp(iax,allstar['FPARAM'][i1[rc],1],allstar[param][i1[rc],1]-apokasc[logg][i2[rc]],
xr=[0,5],yr=[-0.5,0.5],xt='seismic log g',yt='ASPCAP-seismic log g',label=[0.9,0.6,'RC'],color='b',size=5)
plots.plotp(iax,allstar['FPARAM'][i1[rc2],1],allstar[param][i1[rc2],1]-apokasc[logg][i2[rc2]],
xr=[0,5],yr=[-0.5,0.5],xt='seismic log g',yt='ASPCAP-seismic log g',label=[0.9,0.6,'RC'],color='g',size=5)
plots.plotp(iax,allstar['FPARAM'][i1[rc2cl],1],allstar[param][i1[rc2cl],1]-apokasc[logg][i2[rc2cl]],
xr=[0,5],yr=[-0.5,0.5],xt='seismic log g',yt='ASPCAP-seismic log g',label=[0.9,0.6,'RC'],color='m',size=5)
# single plot as f(Teff)
iax=ax2
plots.plotp(iax,allstar['FPARAM'][i1[rgb],0],allstar[param][i1[rgb],1]-apokasc[logg][i2[rgb]],
xr=[4500,5200],yr=[-0.5,0.5],xt='Teff',yt='ASPCAP-seismic log g',label=[0.9,0.8,'RGB'],color='r',size=5)
plots.plotp(iax,allstar['FPARAM'][i1[rc],0],allstar[param][i1[rc],1]-apokasc[logg][i2[rc]],
xr=[4500,5200],yr=[-0.5,0.5],xt='Teff',yt='ASPCAP-seismic log g',label=[0.9,0.6,'RC'],color='b',size=5)
plots.plotp(iax,allstar['FPARAM'][i1[rc2],0],allstar[param][i1[rc2],1]-apokasc[logg][i2[rc2]],
xr=[4500,5200],yr=[-0.5,0.5],xt='Teff',yt='ASPCAP-seismic log g',label=[0.9,0.6,'RC'],color='g',size=15)
plots.plotp(iax,allstar['FPARAM'][i1[rc2cl],0],allstar[param][i1[rc2cl],1]-apokasc[logg][i2[rc2cl]],
xr=[4500,5200],yr=[-0.5,0.5],xt='Teff',yt='ASPCAP-seismic log g',label=[0.9,0.6,'RC'],color='m',size=5)
loggfit=np.arange(2.5,3.5,0.01)
mhfit=loggfit*0.
plots.plotl(tmpax[2],loggfit,rcfit(loggfit,mhfit),color='g',linewidth=2)
plots.plotl(tmpax[2],loggfit,rcfit2(loggfit),color='k',linewidth=2)
tmpax[2].grid()
#plots.plotp(tmpax[3],allstar['FPARAM'][i1[rgb],0],allstar[param][i1[rgb],1]-apokasc[logg][i2[rgb]],
# xr=[3500,7000],yr=[-0.75,0.75],xt='Teff',yt='ASPCAP-seismic log g',label=[0.9,0.8,'RGB'],color='r',size=15)
plots.plotc(tmpax[3],allstar['FPARAM'][i1[rgb],0],allstar[param][i1[rgb],1]-apokasc[logg][i2[rgb]],allstar[param][i1[rgb],3],
xr=[3500,7000],yr=[-0.75,0.75],xt='Teff',yt='ASPCAP-seismic log g',label=[0.9,0.8,'RGB'],zr=[-2,0.5],size=15,colorbar=True)
plots.plotp(tmpax[3],allstar['FPARAM'][i1[rc],0],allstar[param][i1[rc],1]-apokasc[logg][i2[rc]],
xr=[3500,7000],yr=[-0.75,0.75],xt='Teff',yt='ASPCAP-seismic log g',label=[0.9,0.6,'RC'],color='b',size=15)
plots.plotp(tmpax[3],allstar['FPARAM'][i1[rc2],0],allstar[param][i1[rc2],1]-apokasc[logg][i2[rc2]],
xr=[3500,7000],yr=[-0.75,0.75],xt='Teff',yt='ASPCAP-seismic log g',label=[0.9,0.6,'RC'],color='g',size=15)
plots.plotp(tmpax[3],allstar['FPARAM'][i1[rc2cl],0],allstar[param][i1[rc2cl],1]-apokasc[logg][i2[rc2cl]],
xr=[3500,7000],yr=[-0.75,0.75],xt='Teff',yt='ASPCAP-seismic log g',label=[0.9,0.6,'RC'],color='m',size=15)
tmpax[3].grid()
#plots.plotc(tmpax[3],allstar['FPARAM'][i1[rgb],1],allstar['PARAM'][i1[rgb],1]-allstar['FPARAM'][i1[rgb],1],
# allstar['FPARAM'][i1[rgb],3],xr=[0,5],yr=[-1,1],xt='seismic log g',yt='corrected-raw log g',label=[0.1,0.9,'allstar (Kurucz)'],zr=[-2,0.5])
if plotcal :
if raw: tmpax=ax[:,1]
param=allstar['FPARAM'][:,1]-rgbfit(allstar['FPARAM'][:,1],allstar['FPARAM'][:,3])
param[i1[rc]]=allstar['FPARAM'][i1[rc],1]-rcfit(allstar['FPARAM'][i1[rc],1],allstar['FPARAM'][i1[rc],3])
plots.plotc(tmpax[0],allstar['FPARAM'][i1,3],param[i1]-apokasc[logg][i2],
allstar['FPARAM'][i1,1],zr=[0,5],xr=[-2.5,0.5],yr=[-0.75,0.75],xt='[M/H]',colorbar=True,zt='log g',size=15)
plots.plotc(tmpax[1],allstar['FPARAM'][i1,1],param[i1]-apokasc[logg][i2],
allstar['FPARAM'][i1,3],xr=[1,4],zr=[-2.5,0.5],yr=[-0.75,0.75],zt='[M/H]',colorbar=True,xt='log g',size=15)
plots.plotp(tmpax[2],allstar['FPARAM'][i1[rgb],1],param[i1[rgb]]-apokasc[logg][i2[rgb]],
xr=[1,4],yr=[-0.75,0.75],xt='log g',color='r',size=15)
plots.plotp(tmpax[2],allstar['FPARAM'][i1[rc],1],param[i1[rc]]-apokasc[logg][i2[rc]],
xr=[1,4],yr=[-0.75,0.75],xt='log g',color='b',size=15)
plots.plotp(tmpax[2],allstar['FPARAM'][i1[rc2],1],param[i1[rc2]]-apokasc[logg][i2[rc2]],
xr=[1,4],yr=[-0.75,0.75],xt='log g',color='g',size=15)
plots.plotp(tmpax[2],allstar['FPARAM'][i1[rc2cl],1],param[i1[rc2cl]]-apokasc[logg][i2[rc2cl]],
xr=[1,4],yr=[-0.75,0.75],xt='log g',color='m',size=15)
#plots.plotc(tmpax[3],allstar['FPARAM'][i1[rc],1],allstar['PARAM'][i1[rc],1]-allstar['FPARAM'][i1[rc],1],
# allstar['FPARAM'][i1[rc],3],xr=[0,5],yr=[-1,1],xt='seismic log g',zr=[-2,0.5])
fig.tight_layout()
if out is not None :
fig.savefig(out+'_b.png')
plt.close(fig)
fig2.savefig(out+'_c.png')
plt.close(fig2)
return {'calloggmin' : calloggrange[0], 'calloggmax' : calloggrange[1], 'loggmin' : loggmin, 'loggmax' : loggmax,
'mhmin' : mhrange[0], 'mhmax' : mhrange[1], 'calteffmin': calteffrange[0], 'calteffmax' : calteffrange[1],
'rgbfit' : rgbfit.parameters, 'rgbfit2' : params, 'rcfit' : rcfit.parameters, 'rcfit2' : rcfit2.parameters, 'rgbrms' : rgbrms, 'rcrms' : rcrms ,
'rgberrpar': rgberrpar, 'rcerrpar': rcerrpar}
def isochrone(allstar,snrbd=300) :
""" logg correction for cool dwarfs based on isochrones
returns structured array with APOGEE_ID, ISOLOGG
"""
print('getting isochrone log g')
# restrict the sample to good high S/N stars
aspcapmask=bitmask.AspcapBitMask()
starmask=bitmask.StarBitMask()
gd=np.where( ((allstar['ASPCAPFLAG']&aspcapmask.badval()) == 0) &
((allstar['STARFLAG']&starmask.badval()) == 0) &
(allstar['SNR']>=snrbd) ) [0]
allstar=allstar[gd]
if 'TARGFLAGS' in allstar.columns.names : badtarg=['YOUNG','EMBEDDED','EXTENDED','M31','M33','EMISSION','RRLYR','DSPH','MAGCLOUD']
else : badtarg = None
gd=apselect.select(allstar,raw=True,teff=[3000,5000],logg=[4.0,5.5],badtarg=badtarg)
allstar=allstar[gd]
print(len(allstar))
# loop through isochrones, reading, finding matches, and calculating expected isochrone logg given Teff
first=True
for z in np.arange(-1.0,0.3,0.1) :
if z<-0.01 : name='zm{:02d}'.format(int(abs(z)*10))
else :name='zp{:02d}'.format(int(abs(z)*10))
j=np.where(abs(allstar['FPARAM'][:,3]-z) <0.05)[0]
if len(j) > 0:
print(z,len(j),name)
isodata=isochrones.read(os.environ['ISOCHRONE_DIR']+'/'+name+'.dat',agerange=[9.29,9.31])
mdiff = isodata['mini'][0:-1]-isodata['mini'][1:]
use=np.where(abs(mdiff) < 1.e-8)[0]
if len(use) > 0 : use=use[0]
else : use=len(isodata)
if use < 10 : pdb.set_trace()
gd=np.where(isodata['logg'][0:use]>4)[0]
f = interpolate.interp1d(isodata['teff'][gd], isodata['logg'][gd],bounds_error=False)
isologg = f(allstar['FPARAM'][j,0])
if first :
out_id=allstar['APOGEE_ID'][j]
out_isologg=isologg
first= False
else :
out_id=np.append(out_id,allstar['APOGEE_ID'][j])
out_isologg=np.append(out_isologg,isologg)
# output structured array
outtype=np.dtype([('APOGEE_ID',out_id.dtype),('ISOLOGG',isologg.dtype)])
outdata=np.empty(len(out_id),dtype=outtype)
outdata['APOGEE_ID']=out_id
outdata['ISOLOGG']=out_isologg
return outdata
def clusters(allstar,xr=[-2.75,0.5],yr=[-1.,1.],zr=[3500,5500],apokasc='APOKASC_cat_v3.6.0.fits',firstgen=False) :
'''
Compare ASPCAP gravities in clusters to physical gravities
'''
fig,ax=plots.multi(1,2,hspace=0.001)
# put APOKASC underneath
apokasc=fits.open(os.environ['APOGEE_DIR']+'/data/apokasc/'+apokasc)[1].data
# strip off .XXXX if we have it, e.g. from calibration fields where we have added .FIELD
apogee_id = np.array(np.core.defchararray.split(allstar['APOGEE_ID'],'.').tolist())[:,0]
i1,i2=match.match(apogee_id,apokasc['2MASS_ID'])
plots.plotc(ax[0],allstar['FPARAM'][i1,3],allstar['FPARAM'][i1,1]-apokasc['LOGG_SYD_SCALING'][i2],allstar['FPARAM'][i1,0],zr=zr)
plots.plotc(ax[1],allstar['PARAM'][i1,3],allstar['PARAM'][i1,1]-apokasc['LOGG_SYD_SCALING'][i2],allstar['PARAM'][i1,0],zr=zr)
# physical gravities
clust=apselect.clustdata()
itext=0
out=open('clust.txt','w')
for cluster in ['M92','M15','M53','M2','M13','M3','M5','N2420','M67','N6819','N6791'] :
i=np.where(clust.name == cluster)
dist=clust[i].dist*1000.
mh=clust[i].mh
mass=clust[i].giant_mass
ejk=0.452*clust[i].ebv
ah=1.55*clust[i].ebv
age=np.log10(clust[i].age*1.e9)
name=clust[i].name
ytext=0.85-itext%3*0.15
if mass > 0 :
# get cluster members
j=np.array(apselect.clustmember(allstar,cluster,raw=True,firstgen=firstgen))
# calculate physical gravities
lum=10.**(-0.4*(allstar['H'][j]-ah+isochrones.bc(allstar['FPARAM'][j,0],filt='h',agerange=[age-0.05,age+0.05])-(5*np.log10(dist)-5)-4.74))*astropy.constants.L_sun.cgs.value
logg=np.log10(4*np.pi*astropy.constants.G.cgs.value*mass*astropy.constants.M_sun.cgs.value*astropy.constants.sigma_sb.cgs.value*allstar['FPARAM'][j,0]**4/lum)
plots.plotc(ax[0],allstar['FPARAM'][j,3]*0+mh,allstar['FPARAM'][j,1]-logg,allstar['FPARAM'][j,0],xr=xr,yr=yr,zr=zr,yt='ASPCAP-physical log g')
ax[0].text(0.9,0.1,'raw',transform=ax[0].transAxes,ha='right')
plots.plotp(ax[0],allstar['FPARAM'][j,3]*0+mh,allstar['FPARAM'][j,1]-logg,color='k')
plots.plotp(ax[0],mh[0],np.median(allstar['FPARAM'][j,1]-logg),size=40,color='r')
ax[0].text(mh[0],ytext,name[0],ha='center')
out.write('{:<20s}{:8.3f}{:8.3f}{:8.3f}\n'.format(clust[i].name[0],clust[i].dist[0],clust[i].ebv[0],mass[0]))
gd=np.where((allstar['PARAM'][j,3]>-9)&(allstar['PARAM'][j,1]>-9))[0]
axim=plots.plotc(ax[1],allstar['PARAM'][j[gd],3]*0+mh,allstar['PARAM'][j[gd],1]-logg[gd],allstar['PARAM'][j[gd],0],xr=xr,yr=yr,zr=zr,xt='[M/H]',yt='ASPCAP-physical log g')
ax[1].text(0.9,0.1,'calibrated',transform=ax[1].transAxes,ha='right')
plots.plotp(ax[1],mh[0],np.median(allstar['PARAM'][j[gd],1]-logg[gd]),size=40)
# apply a temperature correction for the physical gravities
logg_new=np.log10(4*np.pi*astropy.constants.G.cgs.value*mass*astropy.constants.M_sun.cgs.value*astropy.constants.sigma_sb.cgs.value*(allstar['FPARAM'][j,0]-100.*allstar['FPARAM'][j,3])**4/lum)
plots.plotp(ax[1],mh[0],np.median(allstar['PARAM'][j,1]-logg_new),size=40,color='b')
# use a photometric temperature
logg_phot=np.log10(4*np.pi*astropy.constants.G.cgs.value*mass*astropy.constants.M_sun.cgs.value*astropy.constants.sigma_sb.cgs.value*stars.ghb(allstar['J'][j]-allstar['K'][j]-ejk,allstar['FPARAM'][j,3])[0]**4/lum)
plots.plotp(ax[1],mh[0],np.median(allstar['PARAM'][j,1]-logg_phot),size=40,color='g')
ax[1].text(mh[0],ytext,name[0],ha='center')
itext+=1
# Now adding the colorbar
cbaxes = fig.add_axes([0.9, 0.1, 0.03, 0.8])
cb = plt.colorbar(axim, cax = cbaxes)
out.close()
def dr13dr12() :
'''
ASPCAP compared with physical and asteroseismic log g, DR13/DR12/l30i
'''
apokasc=fits.open(os.environ['APOGEE_DIR']+'/data/apokasc/'+apokasc)[1].data
j=np.where(apokasc['LOGG_SYD_SCALING'] > -1)[0]
apokasc=apokasc[j]
dr12load=apload.ApLoad(dr='dr12')
dr12=dr12load.allStar()[1].data
dr13load=apload.ApLoad(dr='dr13')
dr13=dr13load.allStar()[1].data
dr13load.aspcap='l30i'
dr13load.results='l30i'
l30i=dr13load.allStar()[1].data
fig,ax =plots.multi(3,2,wspace=0.001,hspace=0.001)
# physical gravities
clust=apselect.clustdata()
for cluster in ['M92','M15','M53','M2','M13','M3','M5'] :
i=np.where(clust.name == cluster)
dist=clust[i].dist*1000.
mh=clust[i].mh
mass=0.85
#DR12
j=apselect.clustmember(dr12,cluster,raw=True)
lum=10.**(-0.4*(dr12['H'][j]+isochrones.bc(dr12['FPARAM'][j,0],filt='h',agerange=[10,10.1])-(5*np.log10(dist)-5)-4.74))*astropy.constants.L_sun.cgs.value
logg=np.log10(4*np.pi*astropy.constants.G.cgs.value*mass*astropy.constants.M_sun.cgs.value*astropy.constants.sigma_sb.cgs.value*dr12['FPARAM'][j,0]**4/lum)
plots.plotc(ax[0,0],dr12['FPARAM'][j,3]*0+mh,dr12['FPARAM'][j,1]-logg,dr12['FPARAM'][j,0],xr=[-2.75,-1.],yr=[-2,2],zr=[3500,5500],yt='ASPCAP-physical log g',label=[0.1,0.9,'DR2 raw'])
plots.plotc(ax[1,0],dr12['PARAM'][j,3]*0+mh,dr12['PARAM'][j,1]-logg,dr12['PARAM'][j,0],xr=[-2.75,-1.],yr=[-2,2],zr=[3500,5500],xt='[M/H]',yt='ASPCAP-physical log g',label=[0.1,0.9,'DR12 cal'])
#DR13
j=apselect.clustmember(dr13,cluster,raw=True)
lum=10.**(-0.4*(dr13['H'][j]+isochrones.bc(dr13['FPARAM'][j,0],filt='h',agerange=[10,10.1])-(5*np.log10(dist)-5)-4.74))*astropy.constants.L_sun.cgs.value
logg=np.log10(4*np.pi*astropy.constants.G.cgs.value*mass*astropy.constants.M_sun.cgs.value*astropy.constants.sigma_sb.cgs.value*dr13['FPARAM'][j,0]**4/lum)
plots.plotc(ax[0,1],dr13['FPARAM'][j,3]*0+mh,dr13['FPARAM'][j,1]-logg,dr13['FPARAM'][j,0],xr=[-2.75,-1.],yr=[-2,2],zr=[3500,5500],label=[0.1,0.9,'DR13 raw'])
plots.plotc(ax[1,1],dr13['PARAM'][j,3]*0+mh,dr13['PARAM'][j,1]-logg,dr13['PARAM'][j,0],xr=[-2.75,-1.],yr=[-2,2],zr=[3500,5500],label=[0.1,0.9,'DR13 cal'],xt='[M/H]')
#l30i
j=apselect.clustmember(l30i,cluster,raw=True)
lum=10.**(-0.4*(l30i['H'][j]+isochrones.bc(l30i['FPARAM'][j,0],filt='h',agerange=[10,10.1])-(5*np.log10(dist)-5)-4.74))*astropy.constants.L_sun.cgs.value
logg=np.log10(4*np.pi*astropy.constants.G.cgs.value*mass*astropy.constants.M_sun.cgs.value*astropy.constants.sigma_sb.cgs.value*l30i['FPARAM'][j,0]**4/lum)
plots.plotc(ax[0,2],l30i['FPARAM'][j,3]*0+mh,l30i['FPARAM'][j,1]-logg,l30i['FPARAM'][j,0],xr=[-2.75,-1.],yr=[-2,2],zr=[3500,5500],label=[0.1,0.9,'l30i raw'])
plots.plotc(ax[1,2],l30i['PARAM'][j,3]*0+mh,l30i['PARAM'][j,1]-logg,l30i['PARAM'][j,0],xr=[-2.75,-1.],yr=[-2,2],zr=[3500,5500],label=[0.1,0.9,'l30i cal'],xt='[M/H]')
plt.show()
pdb.set_trace()
# plots vs asterseismic
fig,ax =plots.multi(3,2,wspace=0.001,hspace=0.001)
i1,i2=match.match(dr13['APOGEE_ID'],apokasc['2MASS_ID'])
plots.plotc(ax[0,0],dr13['FPARAM'][i1,3],dr13['FPARAM'][i1,1]-apokasc['LOGG_SYD_SCALING'][i2],dr13['FPARAM'][i1,0],xr=[-2.5,1.],yr=[-2,2],zr=[3500,5500],xt='[M/H]',label=[0.1,0.9,'DR13 raw'])
plots.plotc(ax[1,0],dr13['PARAM'][i1,3],dr13['PARAM'][i1,1]-apokasc['LOGG_SYD_SCALING'][i2],dr13['PARAM'][i1,0],xr=[-2.5,1.],yr=[-2,2],zr=[3500,5500],xt='[M/H]',yt='ASPCAP-seismic log g',label=[0.1,0.9,'DR13 cal'])
i1,i2=match.match(dr12['APOGEE_ID'],apokasc['2MASS_ID'])
plots.plotc(ax[0,1],dr12['FPARAM'][i1,3],dr12['FPARAM'][i1,1]-apokasc['LOGG_SYD_SCALING'][i2],dr12['FPARAM'][i1,0],xr=[-2.5,1.],yr=[-2,2],zr=[3500,5500],label=[0.1,0.9,'DR12 raw'])
plots.plotc(ax[1,1],dr12['PARAM'][i1,3],dr12['PARAM'][i1,1]-apokasc['LOGG_SYD_SCALING'][i2],dr12['PARAM'][i1,0],xr=[-2.5,1.],yr=[-2,2],zr=[3500,5500],xt='[M/H]',label=[0.1,0.9,'DR12 cal'])
i1,i2=match.match(l30i['APOGEE_ID'],apokasc['2MASS_ID'])
plots.plotc(ax[0,2],l30i['FPARAM'][i1,3],l30i['FPARAM'][i1,1]-apokasc['LOGG_SYD_SCALING'][i2],l30i['FPARAM'][i1,0],xr=[-2.5,1.],yr=[-2,2],zr=[3500,5500],label=[0.1,0.9,'l30i raw'])
plots.plotc(ax[1,2],l30i['PARAM'][i1,3],l30i['PARAM'][i1,1]-apokasc['LOGG_SYD_SCALING'][i2],l30i['PARAM'][i1,0],xr=[-2.5,1.],yr=[-2,2],zr=[3500,5500],xt='[M/H]',label=[0.1,0.9,'l30i cal'])
plt.show()
def kurucz_marcs(logg='LOGG_SYD_SCALING',apokasc='APOKASC_cat_v3.6.0.fits') :
'''
asteroseismic log g comparisons for Kurucz and MARCS results
'''
# read APOKASC
apokasc=fits.open('APOKASC_cat_v3.6.0.fits')[1].data
#j=np.where((apokasc['TEFF_FIT'] < 4000) & (apokasc[logg] > -500))[0]
#j=np.where((apokasc['CONS_EVSTATES'] == 'RGB'))[0]
#apokasc=apokasc[j]
# read DR13 and l30i
dr13load=apload.ApLoad(dr='dr13')
dr13=dr13load.allStar()[1].data
dr13load.aspcap='l30i'
dr13load.results='l30i'
l30i=dr13load.allStar()[1].data
# define axes
fig=plt.figure()
ax1=fig.add_subplot(211)
ax2=fig.add_subplot(212)
#ax3=fig.add_subplot(223)
#ax4=fig.add_subplot(224)
# match l30i with APOKASC
i1,i2=match.match(l30i['APOGEE_ID'],apokasc['2MASS_ID'])
warn=np.where(l30i['ASPCAPFLAG'][i1] & bitmask.aspcapflagval('ATMOS_HOLE_WARN'))[0]
bad=np.where(l30i['ASPCAPFLAG'][i1] & bitmask.aspcapflagval('ATMOS_HOLE_BAD'))[0]
rgb= | np.where(apokasc['CONS_EVSTATES'][i2] == 'RGB') | numpy.where |
import h5py
import pickle
import numpy as np
def load_weights():
fff = h5py.File('Mybase/mask_rcnn_coco.h5','r') #打开h5文件
#print(list(f.keys()))
mydict = {}
mydict['global_step:0'] = 1000
########res1########
dset = fff['conv1']
a = dset['conv1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn_conv1']
a = dset['bn_conv1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
########res2########
dset = fff['res2a_branch1']
a = dset['res2a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch1']
a = dset['bn2a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2a']
a = dset['res2a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2a']
a = dset['bn2a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2b']
a = dset['res2a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2b']
a = dset['bn2a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2c']
a = dset['res2a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2c']
a = dset['bn2a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res2b_branch2a']
a = dset['res2b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2a']
a = dset['bn2b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2b_branch2b']
a = dset['res2b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2b']
a = dset['bn2b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2b_branch2c']
a = dset['res2b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2c']
a = dset['bn2b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res2c_branch2a']
a = dset['res2c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2a']
a = dset['bn2c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2c_branch2b']
a = dset['res2c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2b']
a = dset['bn2c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2c_branch2c']
a = dset['res2c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2c']
a = dset['bn2c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
########res3########
dset = fff['res3a_branch1']
a = dset['res3a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch1']
a = dset['bn3a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2a']
a = dset['res3a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2a']
a = dset['bn3a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2b']
a = dset['res3a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2b']
a = dset['bn3a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2c']
a = dset['res3a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2c']
a = dset['bn3a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res3b_branch2a']
a = dset['res3b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2a']
a = dset['bn3b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3b_branch2b']
a = dset['res3b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2b']
a = dset['bn3b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3b_branch2c']
a = dset['res3b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2c']
a = dset['bn3b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res3c_branch2a']
a = dset['res3c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2a']
a = dset['bn3c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3c_branch2b']
a = dset['res3c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2b']
a = dset['bn3c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3c_branch2c']
a = dset['res3c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2c']
a = dset['bn3c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res3d_branch2a']
a = dset['res3d_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2a']
a = dset['bn3d_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3d_branch2b']
a = dset['res3d_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2b']
a = dset['bn3d_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3d_branch2c']
a = dset['res3d_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2c']
a = dset['bn3d_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
########res4########
dset = fff['res4a_branch1']
a = dset['res4a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch1']
a = dset['bn4a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2a']
a = dset['res4a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2a']
a = dset['bn4a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2b']
a = dset['res4a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2b']
a = dset['bn4a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2c']
a = dset['res4a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2c']
a = dset['bn4a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res4b_branch2a']
a = dset['res4b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2a']
a = dset['bn4b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4b_branch2b']
a = dset['res4b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2b']
a = dset['bn4b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4b_branch2c']
a = dset['res4b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2c']
a = dset['bn4b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4c_branch2a']
a = dset['res4c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2a']
a = dset['bn4c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4c_branch2b']
a = dset['res4c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2b']
a = dset['bn4c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4c_branch2c']
a = dset['res4c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2c']
a = dset['bn4c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4d_branch2a']
a = dset['res4d_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2a']
a = dset['bn4d_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4d_branch2b']
a = dset['res4d_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2b']
a = dset['bn4d_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4d_branch2c']
a = dset['res4d_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2c']
a = dset['bn4d_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4e_branch2a']
a = dset['res4e_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2a']
a = dset['bn4e_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4e_branch2b']
a = dset['res4e_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2b']
a = dset['bn4e_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4e_branch2c']
a = dset['res4e_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2c']
a = dset['bn4e_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4f_branch2a']
a = dset['res4f_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2a']
a = dset['bn4f_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4f_branch2b']
a = dset['res4f_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2b']
a = dset['bn4f_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4f_branch2c']
a = dset['res4f_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2c']
a = dset['bn4f_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4g_branch2a']
a = dset['res4g_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2a']
a = dset['bn4g_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4g_branch2b']
a = dset['res4g_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2b']
a = dset['bn4g_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4g_branch2c']
a = dset['res4g_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4g_branch2c']
a = dset['bn4g_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_6/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4h_branch2a']
a = dset['res4h_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2a']
a = dset['bn4h_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4h_branch2b']
a = dset['res4h_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2b']
a = dset['bn4h_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4h_branch2c']
a = dset['res4h_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4h_branch2c']
a = dset['bn4h_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_7/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4i_branch2a']
a = dset['res4i_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2a']
a = dset['bn4i_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4i_branch2b']
a = dset['res4i_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2b']
a = dset['bn4i_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4i_branch2c']
a = dset['res4i_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4i_branch2c']
a = dset['bn4i_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_8/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4j_branch2a']
a = dset['res4j_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2a']
a = dset['bn4j_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4j_branch2b']
a = dset['res4j_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2b']
a = dset['bn4j_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4j_branch2c']
a = dset['res4j_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4j_branch2c']
a = dset['bn4j_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_9/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4k_branch2a']
a = dset['res4k_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2a']
a = dset['bn4k_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4k_branch2b']
a = dset['res4k_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2b']
a = dset['bn4k_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4k_branch2c']
a = dset['res4k_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4k_branch2c']
a = dset['bn4k_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_10/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4l_branch2a']
a = dset['res4l_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2a']
a = dset['bn4l_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4l_branch2b']
a = dset['res4l_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2b']
a = dset['bn4l_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4l_branch2c']
a = dset['res4l_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4l_branch2c']
a = dset['bn4l_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_11/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4m_branch2a']
a = dset['res4m_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4m_branch2a']
a = dset['bn4m_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4m_branch2b']
a = dset['res4m_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4m_branch2b']
a = dset['bn4m_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4m_branch2c']
a = dset['res4m_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4m_branch2c']
a = dset['bn4m_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_12/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4n_branch2a']
a = dset['res4n_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4n_branch2a']
a = dset['bn4n_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4n_branch2b']
a = dset['res4n_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4n_branch2b']
a = dset['bn4n_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_13/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4n_branch2c']
a = dset['res4n_branch2c']
b = | np.array(a['kernel:0'], dtype=np.float32) | numpy.array |
"""timeresp_test.py - test time response functions"""
from copy import copy
from distutils.version import StrictVersion
import numpy as np
import pytest
import scipy as sp
import control as ct
from control import StateSpace, TransferFunction, c2d, isctime, ss2tf, tf2ss
from control.exception import slycot_check
from control.tests.conftest import slycotonly
from control.timeresp import (_default_time_vector, _ideal_tfinal_and_dt,
forced_response, impulse_response,
initial_response, step_info, step_response)
class TSys:
"""Struct of test system"""
def __init__(self, sys=None, call_kwargs=None):
self.sys = sys
self.kwargs = call_kwargs if call_kwargs else {}
def __repr__(self):
"""Show system when debugging"""
return self.sys.__repr__()
class TestTimeresp:
@pytest.fixture
def tsystem(self, request):
"""Define some test systems"""
"""continuous"""
A = np.array([[1., -2.], [3., -4.]])
B = np.array([[5.], [7.]])
C = np.array([[6., 8.]])
D = np.array([[9.]])
siso_ss1 = TSys(StateSpace(A, B, C, D, 0))
siso_ss1.t = np.linspace(0, 1, 10)
siso_ss1.ystep = np.array([9., 17.6457, 24.7072, 30.4855, 35.2234,
39.1165, 42.3227, 44.9694, 47.1599,
48.9776])
siso_ss1.X0 = np.array([[.5], [1.]])
siso_ss1.yinitial = np.array([11., 8.1494, 5.9361, 4.2258, 2.9118,
1.9092, 1.1508, 0.5833, 0.1645, -0.1391])
ss1 = siso_ss1.sys
"""D=0, continuous"""
siso_ss2 = TSys(StateSpace(ss1.A, ss1.B, ss1.C, 0, 0))
siso_ss2.t = siso_ss1.t
siso_ss2.ystep = siso_ss1.ystep - 9
siso_ss2.initial = siso_ss1.yinitial - 9
siso_ss2.yimpulse = np.array([86., 70.1808, 57.3753, 46.9975, 38.5766,
31.7344, 26.1668, 21.6292, 17.9245,
14.8945])
"""System with unspecified timebase"""
siso_ss2_dtnone = TSys(StateSpace(ss1.A, ss1.B, ss1.C, 0, None))
siso_ss2_dtnone.t = np.arange(0, 10, 1.)
siso_ss2_dtnone.ystep = np.array([0., 86., -72., 230., -360., 806.,
-1512., 3110., -6120., 12326.])
siso_tf1 = TSys(TransferFunction([1], [1, 2, 1], 0))
siso_tf2 = copy(siso_ss1)
siso_tf2.sys = ss2tf(siso_ss1.sys)
"""MIMO system, contains ``siso_ss1`` twice"""
mimo_ss1 = copy(siso_ss1)
A = np.zeros((4, 4))
A[:2, :2] = siso_ss1.sys.A
A[2:, 2:] = siso_ss1.sys.A
B = np.zeros((4, 2))
B[:2, :1] = siso_ss1.sys.B
B[2:, 1:] = siso_ss1.sys.B
C = np.zeros((2, 4))
C[:1, :2] = siso_ss1.sys.C
C[1:, 2:] = siso_ss1.sys.C
D = np.zeros((2, 2))
D[:1, :1] = siso_ss1.sys.D
D[1:, 1:] = siso_ss1.sys.D
mimo_ss1.sys = StateSpace(A, B, C, D)
"""MIMO system, contains ``siso_ss2`` twice"""
mimo_ss2 = copy(siso_ss2)
A = np.zeros((4, 4))
A[:2, :2] = siso_ss2.sys.A
A[2:, 2:] = siso_ss2.sys.A
B = np.zeros((4, 2))
B[:2, :1] = siso_ss2.sys.B
B[2:, 1:] = siso_ss2.sys.B
C = np.zeros((2, 4))
C[:1, :2] = siso_ss2.sys.C
C[1:, 2:] = siso_ss2.sys.C
D = np.zeros((2, 2))
mimo_ss2.sys = StateSpace(A, B, C, D, 0)
"""discrete"""
siso_dtf0 = TSys(TransferFunction([1.], [1., 0.], 1.))
siso_dtf0.t = np.arange(4)
siso_dtf0.yimpulse = [0., 1., 0., 0.]
siso_dtf1 = TSys(TransferFunction([1], [1, 1, 0.25], True))
siso_dtf1.t = np.arange(0, 5, 1)
siso_dtf1.ystep = np.array([0. , 0. , 1. , 0. , 0.75])
siso_dtf2 = TSys(TransferFunction([1], [1, 1, 0.25], 0.2))
siso_dtf2.t = np.arange(0, 5, 0.2)
siso_dtf2.ystep = np.array(
[0. , 0. , 1. , 0. , 0.75 , 0.25 ,
0.5625, 0.375 , 0.4844, 0.4219, 0.457 , 0.4375,
0.4482, 0.4424, 0.4456, 0.4438, 0.4448, 0.4443,
0.4445, 0.4444, 0.4445, 0.4444, 0.4445, 0.4444,
0.4444])
"""Time step which leads to rounding errors for time vector length"""
num = [-0.10966442, 0.12431949]
den = [1., -1.86789511, 0.88255018]
dt = 0.12493963338370018
siso_dtf3 = TSys(TransferFunction(num, den, dt))
siso_dtf3.t = np.linspace(0, 9*dt, 10)
siso_dtf3.ystep = np.array(
[ 0. , -0.1097, -0.1902, -0.2438, -0.2729,
-0.2799, -0.2674, -0.2377, -0.1934, -0.1368])
"""dtf1 converted statically, because Slycot and Scipy produce
different realizations, wich means different initial condtions,"""
siso_dss1 = copy(siso_dtf1)
siso_dss1.sys = StateSpace([[-1., -0.25],
[ 1., 0.]],
[[1.],
[0.]],
[[0., 1.]],
[[0.]],
True)
siso_dss1.X0 = [0.5, 1.]
siso_dss1.yinitial = np.array([1., 0.5, -0.75, 0.625, -0.4375])
siso_dss2 = copy(siso_dtf2)
siso_dss2.sys = tf2ss(siso_dtf2.sys)
mimo_dss1 = TSys(StateSpace(ss1.A, ss1.B, ss1.C, ss1.D, True))
mimo_dss1.t = | np.arange(0, 5, 0.2) | numpy.arange |
#! /usr/bin/env python
import ray
import time
import rospy
import cv2
import tf
import os
import atexit
import json
import random
import signal
import subprocess
import sys
import traceback
import numpy as np
import random
from geometry_msgs.msg import PointStamped
import image_geometry
from gym import utils
from gym import spaces
from sensor_msgs.msg import JointState
sys.path.insert(0, '/home/arl/env/src')
import abb_rob_env
def launch_gazebo():
ros_port = random.randint(10000, 14000)
gazebo_port = ros_port + 1
print("Initializing new gazebo instance...")
# Create a new server process and start the client.
gazebo_process = subprocess.Popen(['/home/arl/env/src/abb_irb120_gazebo/gazeboSimulation0.sh', str(gazebo_port), str(ros_port)],
preexec_fn=os.setsid, stdout=open(os.devnull, "w"))
gazebo_pgid = os.getpgid(gazebo_process.pid)
print("Launched Gazebo Process with PGID " + str(gazebo_pgid))
return ros_port, gazebo_port, gazebo_pgid
def launch_moveit(ros_port, gazebo_port):
print("Initializing new moveit instance...")
moveit_process = subprocess.Popen(
['/home/arl/env/src/abb_irb120_gazebo/moveitSimulation0.sh', str(gazebo_port), str(ros_port)],
preexec_fn=os.setsid, stdout=open(os.devnull, "w"))
moveit_pgid = os.getpgid(moveit_process.pid)
print("Launched MoveIt Process with PGID " + str(moveit_pgid))
return moveit_pgid
class ABBReachEnv(abb_rob_env.Abbenv, utils.EzPickle):
def __init__(self, env_config):
#print("Entered Reach Env")
#The last element of the pose encodes the sine of the angle between 90 and -90 degrees
#need to check limits and normalize it+ sin encoding of rot limit
XYZlim = 0.8 # limits for states, don't use for goals
X_max = 1.1 # limit for both goals in x
Ylim = 0.50 # limit for both goals in y
Zlim = 1.0 # upper limit for both goals in z
NZlim = -0.1 # lower limit for both goals in z
self.observation_space = spaces.Dict({'achieved': spaces.Box(low=np.tile(np.array([-0.3, -Ylim, NZlim, -0.1, -0.1]),(5,1)),
high=np.tile(np.array([X_max, Ylim, Zlim, 1.1, 1.1]),(5,1)), dtype=np.float32),
'desired': spaces.Box(low=np.array([-0.3, -Ylim, NZlim, -0.1, -0.1]),
high=np.array([X_max, Ylim, Zlim, 1.1, 1.1]), dtype=np.float32),
'image': spaces.Box(low=0, high=1.1, shape=(234, 234, 4), dtype=np.float32),
'states': spaces.Box(low=np.array([-XYZlim, -XYZlim, 0, -1.1, -0.1]),
high=np.array([XYZlim, XYZlim, XYZlim, 1.1, 1.1]), dtype=np.float32),
})
self.action_space = spaces.Box(low=np.array([-1, -1, -1, -1, -1]),
high=np.array([1, 1, 1, 1, 1]), dtype=np.float32)
if env_config["worker_number"] > 0:
self.index = env_config["worker_number"]
self.ros_port, self.gazebo_port, self.gazebo_pgid = launch_gazebo()
file = open("gazebo_process_" + str(self.index) + ".txt", "w")
file.write(str(self.gazebo_pgid))
file.close()
file = open("ros_port_" + str(self.index) + ".txt", "w")
file.write(str(self.ros_port))
file.close()
#print('PORT NUMBER',self.ros_port)
os.environ['ROS_MASTER_URI'] = "http://localhost:" + str(self.ros_port) + '/'
os.environ['GAZEBO_MASTER_URI'] = "http://localhost:" + str(self.gazebo_port) + '/'
rospy.wait_for_service('/gazebo/get_world_properties')
self.moveit_pgid = launch_moveit(self.ros_port, self.gazebo_port)
file = open("moveit_process_" + str(self.index) + ".txt", "w")
file.write(str(self.moveit_pgid))
file.close()
time.sleep(5)
#print("Entered Reach Env")
rospy.init_node('env', anonymous=True)
self.cam_model = image_geometry.PinholeCameraModel()
self.get_params()
# intializing robot env
abb_rob_env.Abbenv.__init__(self)
utils.EzPickle.__init__(self)
# calling setup env.
#print("Call env setup")
self.pose = np.array([0, 0, 0])
self._env_setup(initial_qpos=self.init_pos)
#print("Exit Reach Env")
def get_params(self):
# get configuration parameters
"""
self.n_actions = rospy.get_param('/fetch/n_actions')
self.has_object = rospy.get_param('/fetch/has_object')
self.block_gripper = rospy.get_param('/fetch/block_gripper')
self.n_substeps = rospy.get_param('/fetch/n_substeps')
self.gripper_extra_height = rospy.get_param('/fetch/gripper_extra_height')
self.target_in_the_air = rospy.get_param('/fetch/target_in_the_air')
self.target_offset = rospy.get_param('/fetch/target_offset')
self.obj_range = rospy.get_param('/fetch/obj_range')
self.target_range = rospy.get_param('/fetch/target_range')
self.distance_threshold = rospy.get_param('/fetch/distance_threshold')
self.init_pos = rospy.get_param('/fetch/init_pos')
self.reward_type = rospy.get_param('/fetch/reward_type')
"""
#from has_object to target range not used except target in the air
self.n_actions = 5
self.has_object = True
self.block_gripper = False
self.n_substeps = 20
self.gripper_extra_height = 0.2
self.target_in_the_air = True
self.target_offset = 0.0
self.obj_range = 0.5
self.target_range = 0.15
self.distance_threshold = 0.01
self.reward_type = "sparse"
self.init_pos = {
'joint0': 0.0,
'joint1': 0.0,
'joint2': 0.0,
'joint3': 0.0,
'joint4': 0.0,
'joint5': 0.0
}
def _set_action(self, action):
# Take action
assert action.shape == (self.n_actions,)
action_w_gripper_state = action.copy() # ensure that we don't change the action outside of this scope
self.pose += np.array(action_w_gripper_state[0:3]) * 0.05
#print(self.pose)
self.pose = np.around(self.pose, decimals=5)
action_w_gripper_state = np.around(action_w_gripper_state, decimals=5)
self.set_trajectory_ee([self.pose[0], self.pose[1], self.pose[2], np.arcsin(action_w_gripper_state[3]), action_w_gripper_state[4]])
def _get_obs(self):
###################################################################################################
#getting the image for the current observation the image should be a numpy array encoding a RGB-D stack
image = self.get_stacked_image()
###################################################################################################
# The pose of the end effector consists of the end effector 3D translation and rotation about the Z axis
# in addition to an indication of the aperature of the gripper and command success
grip_pose, grip_state = self.get_ee_pose()
grip_pos_array = np.array([grip_pose.pose.position.x, grip_pose.pose.position.y, grip_pose.pose.position.z])
grip_rpy = self.get_ee_rpy()
grip_rot_array = np.array([np.sin(grip_rpy.y)])
self.pose = np.array([grip_pose.pose.position.x, grip_pose.pose.position.y, grip_pose.pose.position.z])
#Check whether to add success or if the gripper is opened or closed only
self.gripper_success_only = False
if self.gripper_success_only:
gripper_state = np.array([grip_state[1]]) #is task reached? bnghyar el reach emta?
else:
gripper_state = np.array([grip_state[0]]) #is gripper open?
obs = np.concatenate([ grip_pos_array, grip_rot_array, gripper_state])
#Get the object poses from the simulator and sample achieved goals
object_pos = self.get_model_states()
achieved_goal = self._sample_achieved_goal(object_pos)
return {
'image': image/255,
'states': obs.copy(),
'desired': self.goal.copy(),
'achieved': achieved_goal.copy(),
}
def _is_done(self, observations):
#print("ana f distance l is done")
d = self.goal_distance(observations['achieved'], self.goal) #hwa msh kda hena kul showia hy3od y sample random object msh hyb2a el desired?
if d < self.distance_threshold:
done = (d < self.distance_threshold).astype(np.float32)
else:
done = (d < self.distance_threshold).astype(np.float32)
return bool(done)
def _compute_reward(self, observations, done): #3mora by2olak tamam
#print("ana f distance l reward")
d = self.goal_distance(observations['achieved'], self.goal)
#if self.reward_type == 'sparse':
# #print(d < self.distance_threshold)
# if d < self.distance_threshold:
# return (d < self.distance_threshold).astype(np.float32)
# else:
# #print(-0.05 * ((d > self.distance_threshold).astype(np.float32)))
# return -0.05 * ((d > self.distance_threshold).astype(np.float32))
#else:
# assert False
# return -d
if self.reward_type == 'sparse':
c = np.array(d < self.distance_threshold) #can be squeezed if needed check
c = np.where(c == False, -0.05, 1)
#print("reward = "+str(c))
return float(c)
else:
#print("reward = "+str(-d))
return -d
def compute_reward(self, achieved_goal, goal, info):
#print("ana f distance l rewardzzzz")
# Compute distance between goal and the achieved goal.
d = self.goal_distance(achieved_goal, goal)
if self.reward_type == 'sparse':
c = np.array(d < self.distance_threshold) #can be squeezed if needed check
c = np.where(c == False, -0.05, 1)
return c
else:
return -d
def _set_init_pose(self):
"""Sets the Robot in its init pose"""
self.gazebo.unpauseSim()
gripper_target = | np.array([0.498, 0.005, 0.431]) | numpy.array |
""" Testing group-level finite difference. """
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from six import PY3
from openmdao.api import Problem, Group, Component, IndepVarComp, ExecComp
from openmdao.test.simple_comps import SimpleComp, SimpleArrayComp
from openmdao.test.util import assert_rel_error
if PY3:
def py3fix(s):
return s.replace('<type', '<class')
else:
def py3fix(s):
return s
class TestSrcIndices(unittest.TestCase):
def test_src_indices(self):
size = 10
root = Group()
root.add('P1', IndepVarComp('x', np.zeros(size)))
root.add('C1', ExecComp('y = x * 2.', y=np.zeros(size//2), x=np.zeros(size//2)))
root.add('C2', ExecComp('y = x * 3.', y=np.zeros(size//2), x=np.zeros(size//2)))
root.connect('P1.x', "C1.x", src_indices=list(range(size//2)))
root.connect('P1.x', "C2.x", src_indices=list(range(size//2, size)))
prob = Problem(root)
prob.setup(check=False)
root.P1.unknowns['x'][0:size//2] += 1.0
root.P1.unknowns['x'][size//2:size] -= 1.0
prob.run()
assert_rel_error(self, root.C1.params['x'], np.ones(size//2), 0.0001)
assert_rel_error(self, root.C2.params['x'], -np.ones(size//2), 0.0001)
def test_array_to_scalar(self):
root = Group()
root.add('P1', IndepVarComp('x', np.array([2., 3.])))
root.add('C1', SimpleComp())
root.add('C2', ExecComp('y = x * 3.', y=0., x=0.))
root.connect('P1.x', 'C1.x', src_indices=[0,])
root.connect('P1.x', 'C2.x', src_indices=[1,])
prob = Problem(root)
prob.setup(check=False)
prob.run()
self.assertAlmostEqual(root.C1.params['x'], 2.)
self.assertAlmostEqual(root.C2.params['x'], 3.)
def test_subarray_to_promoted_var(self):
root = Group()
P = root.add('P', IndepVarComp('x', np.array([1., 2., 3., 4., 5.])))
G = root.add('G', Group())
C = root.add('C', SimpleComp())
A = G.add('A', SimpleArrayComp())
G2 = G.add('G2', Group())
A2 = G2.add('A2', SimpleArrayComp())
root.connect('P.x', 'G.A.x', src_indices=[0,1])
root.connect('P.x', 'C.x', src_indices=[2,])
root.connect('P.x', 'G.G2.A2.x', src_indices=[3, 4])
prob = Problem(root)
prob.setup(check=False)
prob.run()
assert_rel_error(self, root.G.A.params['x'], np.array([1., 2.]), 0.0001)
self.assertAlmostEqual(root.C.params['x'], 3.)
assert_rel_error(self, root.G.G2.A2.params['x'], np.array([4., 5.]), 0.0001)
# now try the same thing with promoted var
root = Group()
P = root.add('P', IndepVarComp('x', np.array([1., 2., 3., 4., 5.])))
G = root.add('G', Group())
C = root.add('C', SimpleComp())
A = G.add('A', SimpleArrayComp(), promotes=['x', 'y'])
G2 = G.add('G2', Group())
A2 = G2.add('A2', SimpleArrayComp(), promotes=['x', 'y'])
root.connect('P.x', 'G.x', src_indices=[0,1])
root.connect('P.x', 'C.x', src_indices=[2,])
root.connect('P.x', 'G.G2.x', src_indices=[3, 4])
prob = Problem(root)
prob.setup(check=False)
prob.run()
assert_rel_error(self, root.G.A.params['x'], np.array([1., 2.]), 0.0001)
self.assertAlmostEqual(root.C.params['x'], 3.)
assert_rel_error(self, root.G.G2.A2.params['x'], np.array([4., 5.]), 0.0001)
def test_src_indices_connect_error(self):
root = Group()
P = root.add('P', IndepVarComp('x', np.array([1., 2., 3., 4., 5.])))
G = root.add('G', Group())
C = root.add('C', SimpleComp())
A = G.add('A', SimpleArrayComp())
root.connect('P.x', 'G.A.x', src_indices=[0])
root.connect('P.x', 'C.x', src_indices=[2,])
prob = Problem(root)
with self.assertRaises(Exception) as cm:
prob.setup(check=False)
expected = py3fix("Size 1 of the indexed sub-part of source 'P.x' "
"must be the same as size 2 of target 'G.A.x'.")
self.assertTrue(expected in str(cm.exception))
# now try the same thing with promoted var
root = Group()
P = root.add('P', IndepVarComp('x', np.array([1., 2., 3., 4., 5.])))
G = root.add('G', Group())
C = root.add('C', SimpleComp())
A = G.add('A', SimpleArrayComp(), promotes=['x', 'y'])
root.connect('P.x', 'G.x', src_indices=[0,1,2])
root.connect('P.x', 'C.x', src_indices=[2,])
prob = Problem(root)
with self.assertRaises(Exception) as cm:
prob.setup(check=False)
expected = py3fix("Size 3 of the indexed sub-part of source 'P.x' "
"must be the same as size 2 of target 'G.A.x' (G.x).")
self.assertTrue(expected in str(cm.exception))
def test_inner_connection(self):
class Squarer(Component):
def __init__(self, size):
super(Squarer, self).__init__()
self.add_param(name='input:x', val=np.zeros(size), desc='x')
self.add_output(name='output:x2', val= | np.zeros(size) | numpy.zeros |
import numpy as np
from sfsimodels.models.abstract_models import PhysicalObject
from sfsimodels.models.systems import TwoDSystem
from sfsimodels.functions import interp_left, interp2d, interp3d
from .fns import remove_close_items, build_ele2_node_array
import hashlib
def sort_slopes(sds):
"""Sort slopes from bottom to top then right to left"""
sds = np.array(sds)
scores = sds[:, 0, 1] + sds[:, 1, 1] * 1e6
inds = np.argsort(scores)
return sds[inds]
def adjust_slope_points_for_removals(sds, x, removed_y, retained_y):
for sd in sds:
for i in range(2):
if sd[0][i] == x and sd[1][i] == removed_y:
sd[1][i] = retained_y
def adj_slope_by_layers(xm, ym, sgn=1):
"""
Given mesh coordinates, adjust the mesh to be match the slope by adjust each layer
bottom left and top right coords of mesh are the slope
Parameters
----------
xm
ym
x_slope - NOT needed
y_slope
Returns
-------
"""
# TODO use centroid formula - and use o3plot to get ele-coords
ym = sgn * np.array(ym)
xm = sgn * np.array(xm)
if sgn == -1:
xm = xm[::-1]
ym = ym[::-1]
nh = len(ym[0]) - 1
# dy = min([(ym[0][-1] - ym[0][0]) / nh, (ym[-1][-1] - ym[-1][0]) / nh, 0.2])
dy1 = min([(ym[-1][-1] - ym[-1][0]) / nh])
dy0 = 0.2
y0s = ym[0][0] + np.arange(nh + 1) * dy0
y1s = ym[-1][-1] - np.arange(nh + 1) * dy1
y1s = y1s[::-1]
for i in range(nh + 1):
ym[:, i] = np.interp(xm[:, i], [xm[0][0], xm[-1][-1]], [y0s[i], y1s[i]])
xm[:, i] = xm[:, 0]
y_centres_at_xns = (ym[1:] + ym[:-1]) / 2
y_centres = (y_centres_at_xns[:, 1:] + y_centres_at_xns[:, :-1]) / 2
# get x-coordinates of centres of relevant elements
included_ele = []
dy_inds = len(ym[0, :]) - 1
for i in range(0, dy_inds):
# account for shift before assessing position of centroid
xcens = (xm[1:, i] + xm[:-1, i]) / 2 + 0.375 * (xm[1:, -1] - xm[:-1, -1])
y_surf_at_x_cens = np.interp(xcens, [xm[0][0], xm[-1][-1]], [ym[0][0], ym[-1][-1]])
inds = np.where(y_centres[:, i] < y_surf_at_x_cens)
if len(inds[0]):
included_ele.append(inds[0][0])
else:
included_ele.append(len(y_surf_at_x_cens))
included_ele.append(len(y_surf_at_x_cens))
new_xm = xm
new_ym = ym
for j in range(1, nh + 1):
new_ym[included_ele[0], j] += dy1
for i in range(1, dy_inds + 1):
x_ind_adj = included_ele[i - 1]
x_ind_adj_next = included_ele[i]
if x_ind_adj == x_ind_adj_next:
continue
# shift by half of the ele
dx = (xm[x_ind_adj + 1, i] - xm[x_ind_adj, i]) * 0.5
dxs = np.interp(xm[x_ind_adj:x_ind_adj_next, i], [xm[x_ind_adj, i], xm[x_ind_adj_next, i]], [dx, 0])
new_xm[x_ind_adj:x_ind_adj_next, i] = xm[x_ind_adj:x_ind_adj_next, i] + dxs
for j in range(i + 1, nh + 1):
new_ym[x_ind_adj_next, j] += dy1
if sgn == -1:
new_xm = new_xm[::-1]
new_ym = new_ym[::-1]
return new_xm * sgn, new_ym * sgn
def calc_centroid(xs, ys):
import numpy as np
x0 = np.array(xs)
y0 = np.array(ys)
x1 = np.roll(xs, 1, axis=-1)
y1 = np.roll(ys, 1, axis=-1)
a = x0 * y1 - x1 * y0
xc = np.sum((x0 + x1) * a, axis=-1)
yc = np.sum((y0 + y1) * a, axis=-1)
area = 0.5 * np.sum(a, axis=-1)
xc /= (6.0 * area)
yc /= (6.0 * area)
return xc, yc
def calc_mesh_centroids(fem):
x_inds = []
y_inds = []
if hasattr(fem.y_nodes[0], '__len__'): # can either have varying y-coordinates or single set
n_y = len(fem.y_nodes[0])
else:
n_y = 0
import numpy as np
for xx in range(len(fem.soil_grid)):
x_ele = [xx, xx + 1, xx + 1, xx]
x_inds += [x_ele for i in range(n_y - 1)]
for yy in range(len(fem.soil_grid[xx])):
y_ele = [yy, yy, yy + 1, yy + 1]
y_inds.append(y_ele)
n_eles = len(np.array(x_inds))
x_inds = np.array(x_inds).flatten()
y_inds = np.array(y_inds).flatten()
x0 = np.array(fem.x_nodes[x_inds, y_inds])
y0 = np.array(fem.y_nodes[x_inds, y_inds])
x0 = x0.reshape((n_eles, 4))
y0 = y0.reshape((n_eles, 4))
x1 = np.roll(x0, 1, axis=-1)
y1 = np.roll(y0, 1, axis=-1)
a = x0 * y1 - x1 * y0
xc = np.sum((x0 + x1) * a, axis=-1)
yc = np.sum((y0 + y1) * a, axis=-1)
area = 0.5 * np.sum(a, axis=-1)
xc /= (6.0 * area)
yc /= (6.0 * area)
return xc.reshape(len(fem.soil_grid), len(fem.soil_grid[0])), yc.reshape(len(fem.soil_grid), len(fem.soil_grid[0]))
class FiniteElementVary2DMeshConstructor(object): # maybe FiniteElementVertLine2DMesh
_soils = None
x_index_to_sp_index = None
_inactive_value = 1000000
def __init__(self, tds, dy_target, x_scale_pos=None, x_scale_vals=None, dp: int = None, fd_eles=0, auto_run=True,
use_3d_interp=False, smooth_surf=False, force_x2d=False, min_scale=0.5, max_scale=2.0,
allowable_slope=0.25, smooth_ratio=1.):
"""
Builds a finite element mesh of a two-dimension system
Parameters
----------
tds: TwoDSystem
A two dimensional system of models
dy_target: float
Target height of elements
x_scale_pos: array_like
x-positions used to provide scale factors for element widths
x_scale_vals: array_like
scale factors for element widths
dp: int
Number of decimal places
fd_eles: int
if =0 then elements corresponding to the foundation are removed, else provide element id
smooth_surf: bool
if true then changes in angle of the slope must be less than 90 degrees, builds VaryXY mesh
"""
self.min_scale = min_scale
self.max_scale = max_scale
self.allowable_slope = allowable_slope
self.smooth_ratio = smooth_ratio
assert isinstance(tds, TwoDSystem)
self.tds = tds
self.dy_target = dy_target
if x_scale_pos is None:
x_scale_pos = [0, tds.width]
if x_scale_vals is None:
x_scale_vals = [1., 1.]
self.x_scale_pos = np.array(x_scale_pos)
self.x_scale_vals = np.array(x_scale_vals)
self.dp = dp
self.xs = list(self.tds.x_sps)
self.smooth_surf = smooth_surf
self.xs.append(tds.width)
self.xs = np.array(self.xs)
inds = np.where(np.array(tds.x_surf) <= tds.width)
self.x_surf = np.array(tds.x_surf)[inds]
if tds.width not in self.x_surf:
self.x_surf = np.insert(self.x_surf, len(self.x_surf), tds.width)
self.y_surf = np.interp(self.x_surf, tds.x_surf, tds.y_surf)
self.y_surf_at_sps = np.interp(self.xs, tds.x_surf, tds.y_surf)
self._soils = []
self._soil_hashes = []
for i in range(len(self.tds.sps)):
for yy in range(1, self.tds.sps[i].n_layers + 1):
sl = self.tds.sps[i].layer(yy)
if sl.unique_hash not in self._soil_hashes:
self._soil_hashes.append(sl.unique_hash)
self._soils.append(sl)
self.y_surf_at_xcs = None
self.yd = None
self.xcs_sorted = None
self.sds = None
self.y_blocks = None
self.y_coords_at_xcs = None
self.x_nodes = None
self.y_nodes = None
self.x_nodes2d = None
self._femesh = None
if auto_run:
self.get_special_coords_and_slopes() # Step 1
self.set_init_y_blocks()
self.adjust_blocks_to_be_consistent_with_slopes()
self.trim_grid_to_target_dh()
self.build_req_y_node_positions()
self.set_x_nodes()
if use_3d_interp:
self.build_y_coords_grid_via_3d_interp()
else:
self.build_y_coords_grid_via_propagation()
if self.dp is not None:
self.set_to_decimal_places()
if smooth_surf:
self.adjust_for_smooth_surface()
self.set_soil_ids_to_vary_xy_grid()
elif force_x2d:
self.x_nodes2d = self.x_nodes[:, np.newaxis] * np.ones_like(self.y_nodes)
self.set_soil_ids_to_vary_xy_grid()
else:
self.set_soil_ids_to_vary_y_grid()
self.create_mesh()
if smooth_surf:
self.femesh.tidy_unused_mesh()
if not fd_eles:
self.exclude_fd_eles()
def get_special_coords_and_slopes(self):
"""Find the coordinates, layer boundaries and surface slopes that should be maintained in the FE mesh"""
fd_coords = []
x_off = 0.0
yd = {}
for i in range(len(self.x_surf)):
yd[self.x_surf[i]] = []
if self.tds.width not in yd:
yd[self.tds.width] = []
sds = [] # slope dict (stored left-to-right and bottom-to-top)
for i in range(len(self.tds.bds)):
x_bd = self.tds.x_bds[i]
bd = self.tds.bds[i]
fd_centre_x = x_bd + bd.x_fd
y_surf = np.interp(fd_centre_x, self.x_surf, self.y_surf)
if bd.fd.width > self.dy_target:
fd_coords.append(fd_centre_x)
x_left = fd_centre_x - bd.fd.width / 2
x_right = fd_centre_x + bd.fd.width / 2
if x_left not in yd:
yd[x_left] = []
yd[x_left] += [y_surf, -bd.fd.depth + y_surf]
if x_right not in yd:
yd[x_right] = []
yd[x_right] += [y_surf, -bd.fd.depth + y_surf]
sds.append([[x_left, x_right], [y_surf, y_surf]])
sds.append([[x_left, x_right], [-bd.fd.depth + y_surf, -bd.fd.depth + y_surf]])
for i in range(len(self.tds.sps)):
x_curr = self.tds.x_sps[i]
if x_curr > self.tds.width:
continue
if i == len(self.tds.sps) - 1:
x_next = self.tds.width
else:
x_next = self.tds.x_sps[i + 1] - x_off
# get important x-coordinates that are between two soil profiles
if x_curr not in yd:
yd[x_curr] = []
if x_next not in yd and x_next < self.tds.width:
yd[x_next] = []
x_coords = np.array(list(yd))
inds = np.where((x_coords >= x_curr) & (x_coords <= x_next))
xs = np.sort(x_coords[inds])
y_surf_at_xs = np.interp(xs, self.x_surf, self.y_surf)
y_curr_surf = y_surf_at_xs[0]
# Depths from defined soil profile
int_yy = []
angles = []
for yy in range(1, self.tds.sps[i].n_layers + 1):
# if self.tds.sps[i].layer_depth(yy) >= 0:
y = -self.tds.sps[i].layer_depth(yy) + y_curr_surf
if -y < self.tds.height:
int_yy.append(y)
angles.append(self.tds.sps[i].x_angles[yy - 1])
angles = np.array(angles)
if xs[0] not in yd:
yd[xs[0]] = []
for j in range(len(xs) - 1):
x0 = xs[j]
x_next = xs[j + 1]
if x_next not in yd:
yd[x_next] = []
x0_diff = x0 - x_curr
xn_diff = x_next - x_curr
if y_surf_at_xs[j] not in yd[x0]:
yd[x0].append(y_surf_at_xs[j])
if y_surf_at_xs[j + 1] not in yd[x_next]:
yd[x_next].append(y_surf_at_xs[j + 1])
for k in range(len(int_yy)):
if angles[k] is None or np.isnan(angles[k]):
continue
y_curr = int_yy[k] + angles[k] * x0_diff
if y_curr < y_surf_at_xs[j] and y_curr not in yd[x0]:
yd[x0].append(y_curr)
y_next = int_yy[k] + angles[k] * xn_diff
if y_next < y_surf_at_xs[j + 1] and y_next not in yd[x_next]:
yd[x_next].append(y_next)
if y_curr <= y_surf_at_xs[j] and y_next <= y_surf_at_xs[j + 1]:
sds.append([[x0, x_next], [y_curr, y_next]])
for x in yd:
yd[x].append(-self.tds.height)
yd[x].append(np.interp(x, self.x_surf, self.y_surf))
yd[x] = list(set(yd[x]))
yd[x].sort()
xcs = list(yd)
xcs.sort()
xcs = np.array(xcs)
for i in range(len(xcs) - 1):
xs = np.array([xcs[i], xcs[i + 1]])
slope = [list(xs), list(np.interp(xs, self.x_surf, self.y_surf))]
if abs(slope[1][1] - slope[1][0]) / (slope[0][1] - slope[0][0]) > 0.8:
continue
if slope not in sds:
sds.append(slope)
y_surf_max = max(self.y_surf)
# remove coordinates that are too close
min_y = self.dy_target * self.min_scale
tol = self.dy_target * self.min_scale
for x in yd:
yd[x], pairs = remove_close_items(yd[x], tol=tol)
for pair in pairs:
adjust_slope_points_for_removals(sds, x, pair[0], pair[1])
self.y_surf_at_xcs = {}
for x in yd:
self.y_surf_at_xcs[x] = yd[x][-1]
if y_surf_max not in yd[x] and abs(y_surf_max - max(yd[x])) > tol:
yd[x] = np.insert(yd[x], len(yd[x]), y_surf_max)
yd[x] = np.array(yd[x])
self.yd = yd
x_act = list(self.yd)
x_act.sort()
self.xcs_sorted = np.array(x_act)
self.sds = sort_slopes(sds)
def set_init_y_blocks(self):
"""For each significant vertical line, assign initial number of elements between each special y-coordinate"""
xcs = self.xcs_sorted
y_steps = []
y_blocks = {}
# Step 1: Define an initial set of y_node coordinates at each x-special-position
h_target = self.dy_target
yd_init_inds = []
for i in range(len(xcs)):
xc0 = xcs[i]
y_blocks[xc0] = []
y_steps.append([])
yd_init_inds.append([0])
for j in range(1, len(self.yd[xc0])):
h_diff = -(self.yd[xc0][j - 1] - self.yd[xc0][j])
n_blocks = int(np.round(h_diff / h_target))
if n_blocks == 0:
n_blocks = 1
y_blocks[xc0].append(n_blocks)
n_blocks = [sum(y_blocks[xcs]) for xcs in y_blocks]
n_max = max(n_blocks)
# Step 2: make sure that each column has same number of temp y positions
# - first check if n_blocks less than maximum,
# - if less then add extra elements to the largest thickness
for i in range(len(xcs)):
xc0 = xcs[i]
if len(y_steps[i]) < n_max:
n_extra = n_max - n_blocks[i] # number of blocks to add
h_diffs = np.diff(self.yd[xc0]) # thickness of each zone
for nn in range(n_extra):
dh_options = h_diffs / (np.array(y_blocks[xc0]) + 1)
# index of the zone with thickest average element, where new element will be added
ind_max = np.argmax(dh_options)
y_blocks[xc0][ind_max] += 1
self.y_blocks = y_blocks
def adjust_blocks_to_be_consistent_with_slopes(self):
"""Change the number of elements between special y-coords to try to maintain defined slopes"""
min_dh = self.min_scale * self.dy_target
max_dh = self.max_scale * self.dy_target
xcs = list(self.yd)
xcs.sort()
xcs = np.array(xcs)
yd_list = [self.yd[xc] for xc in xcs]
# yd_list = list(self.yd.values())
# Step 3: For each defined slope, check that the grid is consistent with the slope
# - cycle through moving left to right and bot to top
# - if not consistent then change thickness of elements in zones above and below on right side.
mdirs = [1, -1] # TODO: alternative between forward and reverse add
dd = 0
mdir = mdirs[dd]
old_hash = ''
for pp in range(100):
sds = self.sds[::mdir]
csum_y_blocks = [np.cumsum(self.y_blocks[xcs]) for xcs in self.y_blocks]
fblocks = np.array([j for i in csum_y_blocks for j in i], dtype=int)
new_hash = hashlib.md5(fblocks).hexdigest()
if new_hash == old_hash:
break
old_hash = new_hash
for qq, sd in enumerate(sds):
csum_y_blocks = [np.cumsum(self.y_blocks[xcs]) for xcs in self.y_blocks]
if mdir == 1:
x0 = sd[0][0]
x1 = sd[0][1]
y0 = sd[1][0]
y1 = sd[1][1]
else:
x0 = sd[0][1]
x1 = sd[0][0]
y0 = sd[1][1]
y1 = sd[1][0]
ind_x0 = int(np.argmin(abs(xcs - x0)))
ind_x1 = int(np.argmin(abs(xcs - x1)))
ind_y0 = int(np.argmin(abs(np.array(yd_list[ind_x0]) - y0)))
ind_y1 = int(np.argmin(abs(np.array(yd_list[ind_x1]) - y1)))
x1_c = xcs[ind_x1]
y1_c = yd_list[ind_x1][ind_y1]
nb0 = csum_y_blocks[ind_x0][ind_y0 - 1]
nb1 = csum_y_blocks[ind_x1][ind_y1 - 1]
sgn = int(np.sign(y1 - y0))
dh_dzone = y1 - y0
slope = dh_dzone / (x1 - x0)
if abs(slope) < self.allowable_slope and nb0 == nb1:
continue
if abs(slope) > self.allowable_slope and self.smooth_surf: # TODO: and on surface and n1 - n0 sign is same
y_surf0 = np.interp(x0, self.x_surf, self.y_surf)
y_surf1 = np.interp(x1, self.x_surf, self.y_surf)
if np.isclose(y_surf0, y0, atol=self.dy_target*0.1) and np.isclose(y_surf1, y1, atol=self.dy_target*0.1):
if nb1 >= nb1 and slope > 0:
continue
if nb1 <= nb1 and slope < 0:
continue
diff_nb = nb1 - nb0
y1_below = yd_list[ind_x1][ind_y1 - 1]
if y1_c == self.y_surf_at_xcs[x1_c]: # surface
y1_above = None
try:
x_next = xcs[ind_x1 + 1]
y_next_surf = self.y_surf_at_xcs[x_next]
ind_y_next = int(np.argmin(abs(np.array(yd_list[ind_x1 + 1]) - y_next_surf)))
nb_next = csum_y_blocks[ind_x1 + 1][ind_y_next - 1]
except IndexError:
x_next = None
y_next_surf = None
ind_y_next = None
else:
y1_above = yd_list[ind_x1][ind_y1 + 1]
x_next = None
y_next_surf = None
ind_y_next = None
while sgn != np.sign(diff_nb) and diff_nb != 0:
nb_below = self.y_blocks[x1_c][ind_y1 - 1]
if nb_below + np.sign(diff_nb) * -1 == 0:
break
new_dh_below = (y1_c - y1_below) / (nb_below + np.sign(diff_nb) * -1)
if not (min_dh < new_dh_below < max_dh):
break
nb1 += np.sign(diff_nb) * -1
if y1_c != self.y_surf_at_xcs[x1_c]:
nb_above = self.y_blocks[x1_c][ind_y1]
if nb_above + np.sign(diff_nb) * 1 == 0:
break
new_dh_above = (y1_above - y1_c) / (nb_above + np.sign(diff_nb) * 1)
if not (min_dh < new_dh_above < max_dh):
break
self.y_blocks[x1_c][ind_y1] += np.sign(diff_nb) * 1
else: # check slope of surface is appropriate
a = 1
# new_dh_next = (y_next_surf - y1_above) / (nb_next - (nb_above + np.sign(diff_nb) * 1))
# if not (min_dh < new_dh_above < max_dh):
# break
self.y_blocks[x1_c][ind_y1 - 1] += np.sign(diff_nb) * -1
diff_nb = nb1 - nb0
approx_grid_slope = (dh_dzone - diff_nb * self.dy_target) / (x1 - x0)
if sgn != np.sign(approx_grid_slope):
pass # this can be an issue if it cannot be adjusted
if sgn * approx_grid_slope > self.allowable_slope:
nn = 0
while sgn * approx_grid_slope > self.allowable_slope:
nn += 1
# if no issues then adjust blocks
self.y_blocks[x1_c][ind_y1 - 1] += sgn * 1
nb1 += sgn * 1
diff_nb = nb1 - nb0
if y1_c != self.y_surf_at_xcs[x1_c]:
self.y_blocks[x1_c][ind_y1] += sgn * -1
approx_grid_slope = (dh_dzone - diff_nb * self.dy_target) / (x1 - x0)
if nn > 10:
raise ValueError
diff_nb = nb1 - nb0
if diff_nb: # if zero then slope matches the line
# else check if an adjustment is possible
nnn = abs(diff_nb)
for nn in range(nnn):
diff_nb = nb1 - nb0
if diff_nb == 0:
break
nb_sgn = np.sign(diff_nb)
approx_new_slope = (dh_dzone - (diff_nb - nb_sgn * (nn + 1)) * self.dy_target) / (x1 - x0)
if sgn * approx_new_slope > self.allowable_slope:
break
nb_below = self.y_blocks[x1_c][ind_y1 - 1]
new_nb_below = nb_below + nb_sgn * -1
use_2_below = False
if new_nb_below == 0: # try bring from even lower layer
nb_2_below = self.y_blocks[x1_c][ind_y1 - 2]
new_nb_2_below = nb_2_below + nb_sgn * -1
y1_2_below = yd_list[ind_x1][ind_y1 - 2]
new_dh_2_below = (y1_below - y1_2_below) / new_nb_2_below
if min_dh < new_dh_2_below < max_dh:
use_2_below = True
else:
break
else:
new_dh_below = (y1_c - y1_below) / (nb_below + nb_sgn * -1)
if not (min_dh < new_dh_below < max_dh):
break
if y1_above is not None:
nb_above = self.y_blocks[x1_c][ind_y1]
new_dh_above = (y1_above - y1_c) / (nb_above + nb_sgn * 1)
if not (min_dh < new_dh_above < max_dh):
break
elif y_next_surf is not None:
if abs(nb_next - (nb1 + nb_sgn * -1)) < 2:
pass
else:
new_dh_on_next_surf = (y_next_surf - y1_c) / (nb_next - (nb1 + nb_sgn * -1))
if not (min_dh < new_dh_on_next_surf < max_dh):
break
# if no issues then adjust blocks
if use_2_below:
self.y_blocks[x1_c][ind_y1 - 2] += nb_sgn * -1
else:
self.y_blocks[x1_c][ind_y1 - 1] += nb_sgn * -1
nb1 += nb_sgn * -1
if y1_above is not None:
self.y_blocks[x1_c][ind_y1] += nb_sgn * 1
# Step 5: Set the total number of blocks to be equal to the column that uses the maximum number of
# blocks used to get to the surface
n_blocks = np.array([sum(self.y_blocks[xc]) for xc in xcs])
y_surfs = np.interp(xcs, self.x_surf, self.y_surf)
nbs_at_surf = []
surf_inds = []
for i in range(len(xcs)):
x0 = xcs[i]
nbs = np.cumsum(self.y_blocks[x0])
nbs = np.insert(nbs, 0, 0)
surf_inds.append(np.where(self.yd[x0] >= y_surfs[i] - 0.01)[0][0])
nbs_at_surf.append(nbs[np.where(self.yd[x0] >= y_surfs[i] - 0.01)][0])
# inds = np.where(np.interp(xcs, self.x_surf, self.y_surf) == h_max)[0]
i_max = np.argmax(nbs_at_surf) # maximum number of blocks at top
n_max = nbs_at_surf[i_max]
# create null nodes
for i in range(len(xcs)):
x0 = xcs[i]
if n_blocks[i] != n_max:
n_extra = n_max - n_blocks[i] # TODO: could improve this by minus eles more evenly from zones
if n_extra:
if surf_inds[i] == len(self.y_blocks[x0]):
self.y_blocks[x0].append(0)
self.yd[x0] = np.insert(self.yd[x0], len(self.yd[x0]), self.yd[x0][-1])
self.y_blocks[x0][-1] += n_extra
assert min(self.y_blocks[x0][:surf_inds[i]]) > 0, (x0, self.yd[x0], self.y_blocks[x0][-1])
def trim_grid_to_target_dh(self):
"""Check mesh for potential thin layers and try to remove rows of elements to get elements close to target dh"""
xcs = self.xcs_sorted
opt_low = self.dy_target * (self.min_scale + 1) / 2
opt_high = self.dy_target * (self.max_scale + 1) / 2
y_surfs_at_xcs = np.interp(xcs, self.x_surf, self.y_surf)
# try to trim mesh to be closer to target dh
# First try to remove blocks
opts_tried = []
for nn in range(10):
y_coords_at_xcs = [list(self.yd[xc]) for xc in xcs]
y_node_nums_at_xcs = [list(np.cumsum(self.y_blocks[xcs])) for xcs in self.y_blocks]
for i in range(len(y_node_nums_at_xcs)):
y_node_nums_at_xcs[i].insert(0, 0)
if y_node_nums_at_xcs[i][-2] == y_node_nums_at_xcs[i][-1]:
y_coords_at_xcs[i] = y_coords_at_xcs[i][:-1]
y_node_nums_at_xcs[i] = y_node_nums_at_xcs[i][:-1]
av_dhs = []
min_dhs = []
for i in range(len(y_node_nums_at_xcs)):
av_dhs.append([])
for j in range(len(y_node_nums_at_xcs[i]) - 1):
if (i, j) in opts_tried or y_coords_at_xcs[i][j + 1] > y_surfs_at_xcs[i]:
av_dhs[i].append(1000)
continue
nb = y_node_nums_at_xcs[i][j + 1] - y_node_nums_at_xcs[i][j]
av_dhs[i].append((y_coords_at_xcs[i][j + 1] - y_coords_at_xcs[i][j]) / nb)
min_dhs.append(min(av_dhs[i]))
if min(min_dhs) < self.dy_target: # favour slightly larger elements - could use opt_low
x_ind = min_dhs.index(min(min_dhs))
y_ind = av_dhs[x_ind].index(min_dhs[x_ind])
nb_lowest_p = y_node_nums_at_xcs[x_ind][y_ind] # range where element could be removed
nb_highest_p = y_node_nums_at_xcs[x_ind][y_ind + 1]
if nb_lowest_p >= nb_highest_p:
opts_tried.append((x_ind, y_ind))
continue
hzone_p = y_coords_at_xcs[x_ind][y_ind + 1] - y_coords_at_xcs[x_ind][y_ind]
found_opt = 0
max_new_dhs = []
for opt in range(nb_lowest_p, nb_highest_p):
max_new_dh = hzone_p / (nb_highest_p - nb_lowest_p - 1)
for w in range(len(y_node_nums_at_xcs)):
y_ind = interp_left(opt, y_node_nums_at_xcs[w])
if y_ind == len(y_node_nums_at_xcs[w]) - 1:
y_ind -= 1
nb_low = y_node_nums_at_xcs[w][y_ind]
nb_high = y_node_nums_at_xcs[w][y_ind + 1]
hzone = y_coords_at_xcs[w][y_ind + 1] - y_coords_at_xcs[w][y_ind]
new_dh = hzone / (nb_high - nb_low - 1)
if max_new_dh < new_dh:
max_new_dh = new_dh
max_new_dhs.append(max_new_dh)
max_new_dh = min(max_new_dhs)
yind = max_new_dhs.index(max_new_dh) + nb_lowest_p
if max_new_dh < opt_high:
for w in range(len(y_node_nums_at_xcs)):
y_ind = interp_left(yind, y_node_nums_at_xcs[w])
if y_ind == len(y_node_nums_at_xcs[w]) - 1:
y_ind -= 1
self.y_blocks[xcs[w]][y_ind] -= 1
found_opt = 1
if not found_opt:
opts_tried.append((x_ind, y_ind))
else:
break
# Then try to add blocks
opts_tried = []
for nn in range(20):
y_coords_at_xcs = [list(self.yd[xc]) for xc in xcs]
y_node_nums_at_xcs = [list(np.cumsum(self.y_blocks[xcs])) for xcs in self.y_blocks]
for i in range(len(y_node_nums_at_xcs)):
y_node_nums_at_xcs[i].insert(0, 0)
if y_node_nums_at_xcs[i][-2] == y_node_nums_at_xcs[i][-1]:
y_coords_at_xcs[i] = y_coords_at_xcs[i][:-1]
y_node_nums_at_xcs[i] = y_node_nums_at_xcs[i][:-1]
av_dhs = []
max_dhs = []
for i in range(len(y_node_nums_at_xcs)):
av_dhs.append([])
for j in range(len(y_node_nums_at_xcs[i]) - 1):
if (i, j) in opts_tried or y_coords_at_xcs[i][j + 1] > y_surfs_at_xcs[i]:
av_dhs[i].append(-1)
continue
nb = y_node_nums_at_xcs[i][j + 1] - y_node_nums_at_xcs[i][j]
av_dhs[i].append((y_coords_at_xcs[i][j + 1] - y_coords_at_xcs[i][j]) / nb)
max_dhs.append(max(av_dhs[i]))
if max(max_dhs) > opt_high:
x_ind = max_dhs.index(max(max_dhs))
y_ind = av_dhs[x_ind].index(max_dhs[x_ind])
nb_lowest = y_node_nums_at_xcs[x_ind][y_ind] # range where element could be added
nb_highest = y_node_nums_at_xcs[x_ind][y_ind + 1]
if nb_highest <= nb_lowest:
opts_tried.append((x_ind, y_ind))
continue
hzone_p = y_coords_at_xcs[x_ind][y_ind + 1] - y_coords_at_xcs[x_ind][y_ind]
found_opt = 0
min_new_dhs = []
for opt in range(nb_lowest, nb_highest):
min_new_dh = hzone_p / (nb_highest - nb_lowest + 1)
for w in range(len(y_node_nums_at_xcs)):
y_ind = interp_left(opt, y_node_nums_at_xcs[w])
nb_low = y_node_nums_at_xcs[w][y_ind]
nb_high = y_node_nums_at_xcs[w][y_ind + 1]
hzone = y_coords_at_xcs[w][y_ind + 1] - y_coords_at_xcs[w][y_ind]
new_dh = hzone / (nb_high - nb_low + 1)
if min_new_dh > new_dh:
min_new_dh = new_dh
min_new_dhs.append(min_new_dh)
min_new_dh = max(min_new_dhs)
yind = min_new_dhs.index(min_new_dh) + nb_lowest
if min_new_dh > opt_low:
for w in range(len(y_node_nums_at_xcs)):
y_ind0 = interp_left(yind, y_node_nums_at_xcs[w])
# y_ind1 = interp_left(nb_highest, y_node_nums_at_xcs[w])
self.y_blocks[xcs[w]][y_ind0] += 1
found_opt = 1
if not found_opt:
opts_tried.append((x_ind, y_ind))
else:
break
smallest = 0
for xcs in self.y_blocks:
if self.y_blocks[xcs][-1] < smallest:
smallest = self.y_blocks[xcs][-1]
if smallest != 0:
for xcs in self.y_blocks:
self.y_blocks[xcs][-1] += abs(smallest)
min_h = 1e6
max_h = 0
for xcs in self.y_blocks:
if max(self.y_blocks[xcs]) > max_h:
max_h = max(self.y_blocks[xcs])
if min(self.y_blocks[xcs]) < min_h:
min_h = min(self.y_blocks[xcs])
print('min_h: ', min_h)
print('max_h: ', max_h)
def build_req_y_node_positions(self):
"""
Creates lists of required positions and number of elements for each significant vertical line
Note: It also tries to make sure that steps in slopes are horizontal
"""
min_dh = self.min_scale * self.dy_target
max_dh = self.max_scale * self.dy_target
xcs = self.xcs_sorted
# Step 1: build lists containing required y element numbers and y-coords
req_y_coords_at_xcs = [list(self.yd[xc]) for xc in xcs]
y_node_nums_at_xcs = [list(np.cumsum(self.y_blocks[xcs])) for xcs in self.y_blocks]
for i in range(len(y_node_nums_at_xcs)):
y_node_nums_at_xcs[i].insert(0, 0)
if y_node_nums_at_xcs[i][-2] == y_node_nums_at_xcs[i][-1]:
req_y_coords_at_xcs[i] = req_y_coords_at_xcs[i][:-1]
y_node_nums_at_xcs[i] = y_node_nums_at_xcs[i][:-1]
# Step 2: For each slope that has a step, add additional requirement that slope does not decrease during step
# sds = self.sds
# for sd in sds:
# x0 = sd[0][0]
# x1 = sd[0][1]
# y0 = sd[1][0]
# y1 = sd[1][1]
# ind_x0 = int(np.argmin(abs(xcs - x0)))
# ind_x1 = int(np.argmin(abs(xcs - x1)))
# ind_y0 = int(np.argmin(abs(np.array(req_y_coords_at_xcs[ind_x0]) - y0)))
# ind_y1 = int(np.argmin(abs(np.array(req_y_coords_at_xcs[ind_x1]) - y1)))
# y0_c = req_y_coords_at_xcs[ind_x0][ind_y0]
# nb0 = y_node_nums_at_xcs[ind_x0][ind_y0]
# nb1 = y_node_nums_at_xcs[ind_x1][ind_y1]
# if nb0 != nb1:
# diff_nb = nb1 - nb0
# new_nb = y_node_nums_at_xcs[ind_x1][ind_y1] - diff_nb
# if new_nb not in y_node_nums_at_xcs[ind_x1]:
# dh_upper = (req_y_coords_at_xcs[ind_x1][ind_y1] - y0_c) / diff_nb
# if ind_y1 - 2 < 0:
# nb_lower = nb1 - diff_nb
# else:
# nb_lower = nb1 - y_node_nums_at_xcs[ind_x1][ind_y1 - 1] - diff_nb
# dh_lower = (y0_c - req_y_coords_at_xcs[ind_x1][ind_y1 - 1]) / nb_lower
# if min_dh < dh_upper < max_dh and min_dh < dh_lower < max_dh:
# y_node_nums_at_xcs[ind_x1].append(new_nb)
# y_node_nums_at_xcs[ind_x1].sort()
# req_y_coords_at_xcs[ind_x1].append(y0_c)
# req_y_coords_at_xcs[ind_x1].sort()
# Step 3: Build node number lists
req_y_nodes = []
for i, xc0 in enumerate(xcs):
req_y_nodes.append(list(np.array(y_node_nums_at_xcs[i]) + 1))
req_y_nodes[i][0] = 0
req_y_nodes[i] = np.array(req_y_nodes[i])
req_y_coords_at_xcs[i] = np.array(req_y_coords_at_xcs[i])
for j in range(len(req_y_coords_at_xcs[i]) - 1):
if req_y_coords_at_xcs[i][-1 - j] == req_y_coords_at_xcs[i][-2 - j]:
# print(req_y_nodes[i])
n_eles = req_y_nodes[i][-1 - j] - req_y_nodes[i][-2 - j]
dh = self.dy_target * n_eles
req_y_coords_at_xcs[i][-1 - j] += n_eles
# print(req_y_coords_at_xcs[i])
# raise ValueError
self.req_y_nodes = req_y_nodes
self.req_y_coords_at_xcs = req_y_coords_at_xcs
def build_y_coords_at_xcs(self):
"""Creates the y-coordinates for each node along each significant vertical line"""
xcs = self.xcs_sorted
req_y_nodes = self.req_y_nodes
y_coords_at_xcs = self.req_y_coords_at_xcs
# max_nbs = np.max(req_y_nodes)
nbs_at_surf = []
for i, xc0 in enumerate(xcs):
nbs_at_surf.append(req_y_nodes[i][np.where(y_coords_at_xcs[i] >= self.y_surf_at_xcs[xc0])][0])
# lower the y coordinates of unused to be inline with the right hand used blocks
for i, xc0 in enumerate(xcs[::-1]):
print(i, xc0, nbs_at_surf[-i], nbs_at_surf[-1 - i])
if i == 0:
continue
if nbs_at_surf[-1 - i] < nbs_at_surf[-i]: # if there are more blocks in one to right
diff_nbs = nbs_at_surf[-i] - nbs_at_surf[-1 - i]
min_h = self.y_surf_at_xcs[xc0] + diff_nbs * self.dy_target * 0.5
if nbs_at_surf[-i] in req_y_nodes[-1 - i]:
ind = np.argmin(abs(nbs_at_surf[-i] - req_y_nodes[-1 - i]))
y_coords_at_xcs[-1 - i][ind] = max([self.y_surf_at_xcs[xcs[-i]], min_h])
else:
ind = np.where(req_y_nodes[-1 - i] > nbs_at_surf[-i])[0][0]
req_y_nodes[-1 - i] = np.insert(req_y_nodes[-1 - i], ind, nbs_at_surf[-i])
y_coords_at_xcs[-1 - i] = np.insert(y_coords_at_xcs[-1 - i], ind, self.y_surf_at_xcs[xcs[-i]])
ind = np.where(req_y_nodes[-1 - i] >= nbs_at_surf[-1 - i])[0][0]
if req_y_nodes[-1 - i][ind] != req_y_nodes[-1 - i][-1]: # if there are blocks above the surface
y_coords_at_xcs[-1 - i][ind + 1:] = np.interp(req_y_nodes[-1 - i][ind + 1:], req_y_nodes[-i],
y_coords_at_xcs[-i])
# Step 4: position additional nodes to be consistent with previous column - otherwise equally spaced
y_nodes = []
for i, xc0 in enumerate(xcs):
if i == 0: # first column just interpolate
y_nodes.append(np.interp(np.arange(req_y_nodes[i][-1] + 1), req_y_nodes[i], y_coords_at_xcs[i]))
continue
new_y_vals = []
for j in range(len(y_nodes[i - 1])):
if j not in req_y_nodes[i]:
# if it exceeds surface of left column then interpolate the rest
if 1 == 0:
# if y_nodes[i - 1][j] >= self.y_surf_at_xcs[xcs[i - 1]]:
pass
# if y_nodes[i-1][j] >= self.y_surf_at_xcs[xcs[i-1]]:
# node_nums = [x for x in req_y_nodes[i]]
# y_poses = [x for x in y_coords_at_xcs[i]]
# for nn in range(len(new_y_vals)):
# if nn not in node_nums:
# node_nums.append(nn)
# y_poses.append(new_y_vals[nn])
#
# node_nums.sort()
# y_poses.sort()
# yys = np.interp(np.arange(j, req_y_nodes[i][-1] + 1), node_nums, y_poses)
# new_y_vals += list(yys)
# break
else:
# get next and previous req points and check the slope of each of the those back to left col
ind_below = interp_left(j, req_y_nodes[i])
req_n_below = req_y_nodes[i][ind_below]
req_n_above = req_y_nodes[i][ind_below + 1]
# sf is slope below plus slope above times j / (ind_above - ind_below)
dh_dzone_below = y_coords_at_xcs[i][ind_below] - y_nodes[i - 1][req_n_below]
dh_dzone_above = y_coords_at_xcs[i][ind_below + 1] - y_nodes[i - 1][req_n_above]
dh = dh_dzone_below + (dh_dzone_above - dh_dzone_below) / (req_n_above - req_n_below) * (
j - req_n_below)
new_y_vals.append(y_nodes[i - 1][j] + dh)
else:
ind = np.where(req_y_nodes[i] == j)[0][0]
new_y_vals.append(y_coords_at_xcs[i][ind])
new_y_vals = np.array(new_y_vals)
# adjust positions to ensure element thickness is appropriate
for j in range(len(req_y_nodes[i]) - 1):
ys = new_y_vals[req_y_nodes[i][j]:req_y_nodes[i][j + 1] + 1]
diffs = np.diff(ys)
if len(diffs):
min_h = min(diffs)
max_h = max(diffs)
# h_block = ys[0] - ys[-1]
nbs = req_y_nodes[i][j + 1] - req_y_nodes[i][j]
uni_ys = np.interp(np.arange(req_y_nodes[i][j], req_y_nodes[i][j + 1] + 1), req_y_nodes[i],
y_coords_at_xcs[i])
uni_h = min(np.diff(uni_ys))
if min_h / max_h < 0.7:
x = 0.7 - min_h / max_h
new_ys = (1 - x) * ys + x * uni_ys
new_y_vals[req_y_nodes[i][j]:req_y_nodes[i][j + 1] + 1] = new_ys
if nbs_at_surf[i] == req_y_nodes[i][j] and min_h < self.dy_target:
h0 = new_y_vals[req_y_nodes[i][j]]
hs = h0 + np.arange(0, nbs + 1) * self.dy_target
new_y_vals[req_y_nodes[i][j]:req_y_nodes[i][j + 1] + 1] = hs
y_nodes.append(new_y_vals)
y_nodes = np.array(y_nodes)
# For each surface slope adjust steps so that they are not pointed against slope
for i, xc0 in enumerate(xcs):
if i == len(xcs) - 1:
break
surf_at_xc = self.y_surf_at_xcs[xc0]
ind_yc = np.argmin(abs(y_nodes[i] - surf_at_xc))
if ind_yc == len(y_nodes[i]) - 1:
continue
surf_at_next_xc = self.y_surf_at_xcs[xcs[i + 1]]
ind_nc = np.argmin(abs(y_nodes[i + 1] - surf_at_next_xc))
if ind_nc == ind_yc:
continue
diff_nb = ind_nc - ind_yc
# assert diff_nb > 0 # currently only supports smoothing forward
next_slope = surf_at_next_xc - surf_at_xc
# trim either half the block or min_dh
if next_slope > 0 and diff_nb > 0:
ind_yc2 = np.where(y_nodes[i + 1] > surf_at_xc)[0][0]
ind_yc2 = max(ind_yc2, ind_yc + 1)
curr_col_ys = list(y_nodes[i][ind_yc2 - 1: ind_nc + 1])
next_ys = list(y_nodes[i + 1][ind_yc2 - 1: ind_nc + 1])
# y_nodes[i][ind_yc: ind_nc] = (next_ys - next_ys[0]) * 0.5 + next_ys[0]
av_dh = next_slope / diff_nb
update_unused = 0
for kk in range(1, len(next_ys)):
a = curr_col_ys[kk] - next_ys[kk]
new_dh = next_ys[kk] - curr_col_ys[kk - 1]
if new_dh < self.dy_target * 0.5:
next_ys[kk] = curr_col_ys[kk - 1] + self.dy_target * 0.5
if (curr_col_ys[kk] - next_ys[kk]) / av_dh > 0.2:
update_unused = 1
if update_unused:
y_nodes[i][ind_yc2: ind_nc + 1] = next_ys[1:]
# elif next_slope < 0 and diff_nb < 0:
# next_ys = y_nodes[i+1][ind_yc+1: ind_nc + 1]
# # y_nodes[i][ind_yc: ind_nc] = (next_ys - next_ys[0]) * 0.5 + next_ys[0]
# y_nodes[i][ind_yc+1: ind_nc + 1] = next_ys
self.y_coords_at_xcs = y_nodes[:, ::-1] # invert y-values
def build_y_coords_grid_via_propagation(self):
"""Interpolates the position of all nodes based on the y-coordinates along the significant lines"""
if self.y_coords_at_xcs is None:
self.build_y_coords_at_xcs()
if self.x_nodes is None:
self.set_x_nodes()
self.y_nodes = interp2d(self.x_nodes, np.array(self.xcs_sorted), self.y_coords_at_xcs)
def build_y_coords_grid_via_3d_interp(self):
"""Interpolates the position of all nodes based on the coordinates of the significant positions"""
if self.x_nodes is None:
self.set_x_nodes()
y_node_nums = np.arange(0, self.req_y_nodes[0][-1] + 1)
ys = []
for i in range(len(self.x_nodes)):
ys.append(
interp3d(self.x_nodes[i], y_node_nums, self.xcs_sorted, self.req_y_nodes, self.req_y_coords_at_xcs))
self.y_nodes = np.array(ys)
def set_x_nodes(self):
"""Determine optimal position of node x-coordinates"""
dxs = [0]
x_start = 0
x_scale_curr = self.x_scale_vals[0]
for xx in range(1, len(self.xcs_sorted)):
x_shift = self.xcs_sorted[xx] - self.xcs_sorted[xx - 1]
extra_inds = np.where(
(self.xcs_sorted[xx] > self.x_scale_pos) & (self.x_scale_pos > self.xcs_sorted[xx - 1]))
x_cps = [self.xcs_sorted[xx - 1]]
x_scales = [x_scale_curr]
for i in extra_inds[0]:
x_cps.append(self.x_scale_pos[i])
x_scales.append(self.x_scale_vals[i])
x_scales = np.array(x_scales)
x_cps.append(self.xcs_sorted[xx])
zone_widths = np.diff(x_cps)
n_eles = | np.sum(zone_widths / (x_scales * self.dy_target)) | numpy.sum |
"""
BRL util functions
"""
import numpy as np
from scipy.stats import norm
import pdb
COLORS = ['g','k','r','b','c','m','y','burlywood','chartreuse','0.8','0.6', '0.4', '0.2']
MARKER = ['-','x-', '-.','+-','*-','d-','o-','x-','s-','2-','3-']
T_chain = 5000
T_loop = 5000
T_grid5 = 10000
T_grid10 = 20000
T_minimaze = 30000
T_maze = 40000
T_movingmaze = 200
DTYPE = np.float64
EVAL_RUNS = 10
EVAL_NUM = 100
EVAL_STEPS = 50
EVAL_EPS = 0.0
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight = False):
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(unicode(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
def discrete_phi(state, action, dim, anum):
"""Linear function approximation for tabular case
"""
phi = np.zeros(dim, dtype=np.float)
phi[state*anum+action] = 1.0
return phi
def img_preprocess(org_img):
"""Atari games image preprocessing similar to DQN paper
"""
imgGray = cv2.cvtColor( org_img, cv2.COLOR_RGB2GRAY )
resizedImg = cv2.resize(np.reshape(imgGray, org_img.shape[:-1]), (84, 110))
cropped = resizedImg[18:102,:]
cropped = cropped.astype(np.float32)
cropped *= (1.0/255.0)
return cropped
def rbf(state, action, dim, const=1.0):
"""Radial Basis Function used in KTD paper (https://www.jair.org/index.php/jair/article/view/10675/25513)
"""
n = dim
c1 = np.reshape(np.array([-np.pi/4.0, 0.0, np.pi/4.0]),(3,1)) # For inverted pendulum
c2 = np.reshape(np.array([-1.0,0.0,1.0]), (1,3)) # For inverted pendulum
basis = np.exp(-0.5*(c1-state[0])**2)*np.exp(-0.5*(c2-state[1])**2)
basis = np.append(basis.flatten(), const)
phi = np.zeros(3*n, dtype=np.float32)
phi[action*n:(action+1)*n] = basis
return phi
def plot_IQR(T, data, labels, x_label='x', y_label='y', x_vals = None, title=None, save=False, legend=(True, 'upper right'),
shadow=True, pic_name = None, colors=None, smoothed=False):
import matplotlib.pyplot as plt
"""Plot with interquartile
T : True finite-time horizon
data : data to plot
labels : labels to display in legend
x_label : x-axis label
y_label : y-axis label
x_vals : x values. If not given (None), it is determined with T.
title : title name to plot
save : True to save the plot rather than display
shadow : fill between 25% and 75%
legend : Tuple with - legend[0] is True if you want to display a legend. legend[1] = a tuple for anchor location.
pic_name : a name of an image file of the plot
"""
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
plot_err = []
f, ax = plt.subplots()
N = data.shape[-1]
if len(data.shape) == 2:
data = data[np.newaxis,:]
if x_vals == None:
x_vals = range(0,T, int(T/N))
if N != x_vals:
x_vals = x_vals[:(N-len(x_vals))]
for (i,y) in enumerate(data):
if smoothed:
tmp_y = []
for yi in y:
tmp_y.append(smoothing(yi,4))
y = np.array(tmp_y)
m, ids25, ids75 = iqr(y)
tmp, = ax.plot(x_vals, m, MARKER[i+2], color=colors[i], markeredgecolor = colors[i], markerfacecolor='None', label=labels[i], linewidth=2.0)
plot_err.append(tmp)
if shadow:
ax.fill_between(x_vals, list(ids75), list(ids25), facecolor=colors[i], alpha=0.3)
if legend[0]:
ax.legend(plot_err, labels ,loc=legend[1], shadow=False, fancybox=True, framealpha=0.5,
prop={'family':'Times New Roman', 'size':16})#, bbox_to_anchor=legend[1])
ax.tick_params(axis='both',which='major',labelsize=11)
ax.set_xlabel(x_label,fontsize=14, fontname="Times New Roman")
ax.set_ylabel(y_label,fontsize=14, fontname="Times New Roman")
ax.grid()
if title:
ax.set_title(title,fontsize=15,fontname="Times New Roman")
if save:
f.savefig(pic_name, bbox_inches='tight', pad_inches=0)
else:
plt.show()
return f, ax
def plot_sd(T, data, labels, x_label='x', y_label='y', x_vals = None, title=None, save=False, legend=(True, 'upper right', (1.0, 1.0)),
shadow=True, pic_name = None, colors=None, smoothed=False, figure=None):
import matplotlib.pyplot as plt
"""Plot with interquartile
T : True finite-time horizon
data : data to plot
labels : labels to display in legend
x_label : x-axis label
y_label : y-axis label
x_vals : x values. If not given (None), it is determined with T.
title : title name to plot
save : True to save the plot rather than display
shadow : fill between 25% and 75%
legend : Tuple with - legend[0] is True if you want to display a legend. legend[1] = a tuple for anchor location.
pic_name : a name of an image file of the plot
"""
if colors is None:
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
#if not(colors):
# colors = ['g','k','c','b','m','r','y','burlywood','chartreuse','0.8','--', '-.', ':']
if figure:
f, ax, plegend = figure
else:
f, ax = plt.subplots()
plegend = []
N = data.shape[-1]
if len(data.shape) == 2:
data = data[np.newaxis,:]
if x_vals is None:
x_vals = range(0,T, int(T/N))
if N != len(x_vals):
x_vals = x_vals[:(N-len(x_vals))]
for (i,y) in enumerate(data):
if smoothed:
tmp_y = []
for yi in y:
tmp_y.append(smoothing(yi,4))
y = | np.array(tmp_y) | numpy.array |
from mri_modules.utils import *
import os
import numpy as np
import cv2
import shutil
from skimage.measure import marching_cubes_lewiner as marching_cubes
import stl
from stl import mesh
import tensorflow as tf
from tensorflow.keras.models import load_model
import skimage.transform
import nibabel as nib
import h5py
import scipy
from mri_modules.load_in_arrays import *
import time
import random
import tensorflow.keras as keras
import tensorflow.keras.backend as K
import nilearn
import SimpleITK as sitk
import imregpoc
from math import pi
import sys
from skimage.morphology import convex_hull_image
start_time = time.time()
def binarize(array, min_):
binary = array.copy()
binary[array < min_] = 0
binary[array >= min_] = 1
return binary
def dilate_up(array, size, stacked = True):
if stacked:
binary = np.squeeze(array.copy()[0], axis = 3)
else:
binary = array.copy()
binary[binary > 0] = 1
kernel = scipy.ndimage.generate_binary_structure(3, 1)
blew_up = scipy.ndimage.binary_dilation(binary.astype('uint8'), kernel, iterations=size)
if stacked:
return np.stack([np.stack([blew_up], axis = 3)])
else:
return blew_up
def translate_3d(array, translation):
original_array = array.copy()
array_translated = array.copy()
array_translated[:] = 0
for z,Slice in enumerate(original_array):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
try:
array_translated[z+translation[0]][y+translation[1]][x+translation[2]] = pixel
except:
pass
return array_translated
def touching_island(reference, array, stacked = True):
if stacked:
array = np.squeeze(array.copy()[0], axis = 3)
array[array > 0] = 1
reference = np.squeeze(reference.copy()[0], axis = 3)
reference[reference > 0] = 1
else:
array[array > 0] = 1
reference[reference > 0] = 1
masked = array.copy()
masked[:] = 0
touching_structure_3d =[[[0,0,0],
[0,1,0],
[0,0,0]],
[[0,1,0],
[1,1,1],
[0,1,0]],
[[0,0,0],
[0,1,0],
[0,0,0]]]
markers, num_features = scipy.ndimage.measurements.label(array, touching_structure_3d)
reference_idx = np.unique(markers[reference == 1])
for idx in reference_idx:
masked[markers == idx] = 1
masked[array == 0] = 0
if stacked:
return np.stack([np.stack([masked], axis = 3)])
else:
return masked
def biggest_island(input_array, stacked = True):
if stacked:
masked = np.squeeze(input_array.copy()[0], axis = 3)
binary = np.squeeze(input_array.copy()[0], axis = 3)
binary[:] = 0
binary[np.squeeze(input_array[0], axis = 3) > 0] = 1
else:
masked = input_array.copy()
binary = input_array.copy()
binary[:] = 0
binary[input_array > 0] = 1
touching_structure_3d =[[[0,0,0],
[0,1,0],
[0,0,0]],
[[0,1,0],
[1,1,1],
[0,1,0]],
[[0,0,0],
[0,1,0],
[0,0,0]]]
markers,_ = scipy.ndimage.measurements.label(binary,touching_structure_3d)
markers[binary == 0] = 0
counts = np.bincount(markers.ravel())
counts[0] = 0
noise_idx = np.where(counts != np.max(counts))
noise = np.isin(markers, noise_idx)
binary[noise] = 0
masked[binary == 0] = 0
if stacked:
return np.stack([np.stack([masked], axis = 3)])
else:
return masked
def combine_zeros(arrays):
combined = arrays[0].copy()
for array in arrays:
combined[array < 0.1] = 0
return combined
def adaptive_threshold(array, course, precise, blur_precision = 0, stacked = True):
if stacked:
thresholded_array = np.squeeze(array.copy()[0], axis = 3)
thresholded_array = thresholded_array*255
thresholded_array[thresholded_array > 255] = 255
else:
thresholded_array = array.copy()
thresholded_array = thresholded_array*255
thresholded_array[thresholded_array > 255] = 255
blurred = scipy.ndimage.gaussian_filter(thresholded_array, blur_precision)
adap = []
for image in blurred:
thresh = cv2.adaptiveThreshold(image.astype('uint8'), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, course, 2)
thresh2 = cv2.adaptiveThreshold(image.astype('uint8'), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, precise, 2)
thresh3 = thresh.copy()
thresh3[:] = 255
thresh3[thresh2 == 0] = 0
thresh3[thresh == 0] = 0
adap.append(thresh3)
adap = np.stack(adap)
thresholded_array[adap == 0] = 0
if stacked:
return np.stack([np.stack([thresholded_array/255], axis = 3)])
else:
return thresholded_array/255
def generate_stl(array_3d, stl_file_path, stl_resolution):
array = array_3d.copy()
verts, faces, norm, val = marching_cubes(array, 0.01, step_size = stl_resolution, allow_degenerate=True)
mesh = stl.mesh.Mesh(np.zeros(faces.shape[0], dtype=stl.mesh.Mesh.dtype))
for i, f in enumerate(faces):
for j in range(3):
mesh.vectors[i][j] = verts[f[j],:]
if not stl_file_path.endswith(".stl"):
stl_file_path += ".stl"
if not os.path.exists(os.path.dirname(stl_file_path)):
os.makedirs(os.path.dirname(stl_file_path))
mesh.save(stl_file_path)
def find_median_grayscale(array):
zero_pixels = float(np.count_nonzero(array==0))
single_dimensional = array.flatten().tolist()
single_dimensional.extend(np.full((1, int(zero_pixels)), 1000).flatten().tolist())
return np.median(single_dimensional)
def locate_bounds(array, stacked = True):
if stacked:
left = np.squeeze(array.copy()[0], axis = 3).shape[2]
right = 0
low = np.squeeze(array.copy()[0], axis = 3).shape[1]
high = 0
shallow = np.squeeze(array.copy()[0], axis = 3).shape[0]
deep = 0
array_3d = np.squeeze(array.copy()[0], axis = 3)
else:
left = array.copy().shape[2]
right = 0
low = array.copy().shape[1]
high = 0
shallow = array.copy().shape[0]
deep = 0
array_3d = array.copy()
for z,Slice in enumerate(array_3d):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
if pixel > 0:
if z > deep:
deep = z
if z < shallow:
shallow = z
if y > high:
high = y
if y < low:
low = y
if x > right:
right = x
if x < left:
left = x
return [left,right,low,high,shallow,deep]
def pad(array):
padded = []
for image in array:
padded.append(image)
padded.append(np.zeros((array.shape[1],array.shape[2])))
padded.append(np.zeros((array.shape[1],array.shape[2])))
final = translate_3d(np.stack(padded), [1,1,1])
return final
def write_images(array, test_folder_path):
if not os.path.exists(test_folder_path):
os.makedirs(test_folder_path)
for n,image in enumerate(array):
file_name = str(str(n) +'.png')
cv2.imwrite(os.path.join(test_folder_path, file_name), image*255)
def circle_highlighted(reference, binary, color):
circled = reference.copy()
binary = binary.copy()
binary[binary > 0] = 1
for n, image in enumerate(binary):
contours, _ = cv2.findContours(image.astype('uint8'),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(circled[n], contours, -1,color, 1)
return circled
def preprocess_data(path,rescale_factor = None,translation = None):
print(path)
image_data, valid = load_dicom_folder(path, updating_labels = False)
print(image_data.shape)
print(valid)
print(np.max(image_data))
print("oofoofoofoofofofofofof")
'''if "t2" in path.lower():
image_data = np.rot90(image_data, axes = (2,1)).T'''
image_data = image_data/np.max(image_data)
blank_unscaled_array = image_data.copy()
blank_unscaled_array[:] = 0
z_zoom = image_size/image_data.shape[0]
y_zoom = image_size/image_data.shape[1]
x_zoom = image_size/image_data.shape[2]
print(z_zoom, y_zoom, x_zoom)
rescaled_blank = skimage.transform.rescale(blank_unscaled_array, (z_zoom, y_zoom, x_zoom))
image_data = np.stack([np.stack([image_data], axis = 3)])
if rescale_factor is None:
bounds_finder = image_data.copy()
bounds_finder = adaptive_threshold(bounds_finder, 101, 45, 1)
bounds_finder = biggest_island(bounds_finder)
bounds = locate_bounds(bounds_finder)
[left,right,low,high,shallow,deep] = bounds
x_size = abs(left-right)
y_size = abs(low-high)
z_size = abs(shallow-deep)
max_size = np.max([x_size, y_size, z_size])
rescale_factor = (image_size*0.8)/max_size
backscale_factor = 1/rescale_factor
image_data = skimage.transform.rescale(np.squeeze(image_data.copy()[0], axis = 3), (rescale_factor, rescale_factor, rescale_factor))
if translation is None:
bounds_finder = image_data.copy()
bounds_finder = adaptive_threshold(bounds_finder, 101, 45, 1, stacked = False)
bounds_finder = biggest_island(bounds_finder, stacked = False)
bounds = locate_bounds(np.stack([np.stack([bounds_finder], axis = 3)]))
else:
bounds=translation
print("\n\nbounds:",bounds,"\n\n")
[left,right,low,high,shallow,deep] = bounds
image_data = translate_3d(image_data, [-shallow,-low,-left])
rescaled_array = rescaled_blank.copy()
for z,Slice in enumerate(image_data):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
try:
rescaled_array[z][y][x] = pixel
except:
pass
return rescaled_array, rescale_factor,bounds
def ConvNetsemantic(x,y,z):
inputs = keras.layers.Input((x,y,z, 3))
p0 = inputs
c1, p1 = down_block(p0, 8, 0.1) #128 -> 64
print(p1.shape)
c2, p2 = down_block(p1, 16, 0.1) #64 -> 32
c3, p3 = down_block(p2, 32, 0.2) #32 -> 16
c4, p4 = down_block(p3, 64, 0.3) #16->8
c5, p5 = down_block(p4, 128, 0.3) #16->8
c6, p6 = down_block(p5, 256, 0.3) #16->8
c7, p7 = down_block(p6, 512, 0.3) #16->8
bn = bottleneck(p7, 1024, 0.4)
print(bn.shape)
u1 = up_block(bn, c7, 512, 0.3) #8 -> 16
u2 = up_block(u1, c6, 256, 0.2) #16 -> 32
u3 = up_block(u2, c5, 128, 0.1) #32 -> 64
u4 = up_block(u3, c4, 64, 0.1) #64 -> 128
u5 = up_block(u4, c3, 32, 0.1) #64 -> 128
u6 = up_block(u5, c2, 16, 0.1) #64 -> 128
u7 = up_block(u6, c1, 8, 0.1) #64 -> 128
outputs = tf.keras.layers.Conv3D(4, (1, 1, 1),padding='same', activation="softmax")(u7)
#outputs = keras.layers.Conv3D(1, (1, 1, 1), padding="same", activation="relu")(u4)
print("out")
print(outputs.shape)
model = keras.models.Model(inputs, outputs)
return model
def register(fixed_image, moving_image, orig, transform = None):
if transform is None:
resamples = []
metrics = []
transforms = []
for i in range (1,10):
ImageSamplingPercentage = 1
initial_transform = sitk.CenteredTransformInitializer(fixed_image, moving_image, sitk.ScaleVersor3DTransform(), sitk.CenteredTransformInitializerFilter.MOMENTS)
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=200)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(float(ImageSamplingPercentage)/100)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=0.001, numberOfIterations=10**5, convergenceMinimumValue=1e-6, convergenceWindowSize=100) #Once
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.SetInitialTransform(initial_transform)
#registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
#registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas = [2,1,0])
#registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
transform = registration_method.Execute(fixed_image, moving_image)
#print(transform)
print("number:",i)
print(registration_method.GetMetricValue())
metrics.append(registration_method.GetMetricValue())
resamples.append(sitk.Resample(orig, fixed_image, transform, sitk.sitkLinear, 0.0, moving_image.GetPixelID()))
transforms.append(transform)
print(np.min(metrics))
return sitk.GetArrayFromImage(resamples[metrics.index(np.min(metrics))]),transforms[metrics.index(np.min(metrics))]
else:
return sitk.GetArrayFromImage(sitk.Resample(orig, fixed_image, transform, sitk.sitkLinear, 0.0, moving_image.GetPixelID())),transform
def adaptive_threshold(array, course, precise, blur_precision = 0, stacked = True):
if stacked:
thresholded_array = np.squeeze(array.copy()[0], axis = 3)
thresholded_array = thresholded_array*255
thresholded_array[thresholded_array > 255] = 255
else:
thresholded_array = array.copy()
thresholded_array = thresholded_array*255
thresholded_array[thresholded_array > 255] = 255
blurred = scipy.ndimage.gaussian_filter(thresholded_array, blur_precision)
adap = []
for image in blurred:
thresh = cv2.adaptiveThreshold(image.astype('uint8'), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, course, 2)
thresh2 = cv2.adaptiveThreshold(image.astype('uint8'), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, precise, 2)
thresh3 = thresh.copy()
thresh3[:] = 255
thresh3[thresh2 == 0] = 0
thresh3[thresh == 0] = 0
adap.append(thresh3)
adap = np.stack(adap)
thresholded_array[adap == 0] = 0
if stacked:
return np.stack([np.stack([thresholded_array/255], axis = 3)])
else:
return thresholded_array/255
def convex_border(array, thickness):
contour_only = array.copy()
binary = array.copy()
contour_only[:] = 0
binary[:] = 0
binary[array > 0] = 255
cont = []
for n, image in enumerate(binary):
contours, _ = cv2.findContours(image.astype('uint8'),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
hull = cv2.convexHull(contour)
cv2.drawContours(contour_only[n], [hull], -1, 200, thickness)
return contour_only
def convex_hull(array):
contour_only = array.copy()
binary = array.copy()
hull = []
binary[:] = 0
binary[array > 0.05] = 255
cont = []
for n, image in enumerate(binary):
convex = np.array(convex_hull_image(image.astype('uint8')),dtype="float64")
hull.append(convex)
return np.stack(hull)
def fill_holes_binary(array, sense):
binary = array.copy()
binary_original = array.copy()
binary_original[:] = 0
binary_original[array > 0] = 1
binary[:] = 0
binary[array == 0] = 1
touching_structure_2d = [[0,1,0],
[1,1,1],
[0,1,0]]
denoised = []
for n,image in enumerate(binary):
markers, num_features = scipy.ndimage.measurements.label(image,touching_structure_2d)
omit = markers[0][0]
flat = markers.ravel()
binc = np.bincount(flat)
binc_not = np.bincount(flat[flat == omit])
noise_idx2 = np.where(binc > sense)
noise_idx1 = np.where(binc == np.max(binc_not))
mask1 = np.isin(markers, noise_idx1)
mask2 = np.isin(markers, noise_idx2)
image[mask1] = 0
image[mask2] = 0
denoised.append(image)
denoised = np.stack(denoised)
binary_original[denoised == 1] = 1
return binary_original
def convex_shape(input_array):
#binary = adaptive_threshold(input_array, 101, 45, 1, stacked = False)
#binary[input_array < 0.1] = 0
binary = np.array(input_array > 0.1,dtype = "float64")
binary = biggest_island(binary, stacked = False)
binary = convex_hull(binary)
binary = biggest_island(binary, stacked = False)
return binary
def trim(flair,t1,t2):
flair_cut = flair.copy()
t1_cut = t1.copy()
t1_cut[:] = 0
t2_cut = t2.copy()
t2_cut[:] = 0
for n,image in enumerate(flair):
if np.max(flair) > 0 and np.max(t1) > 0 and np.max(t2) > 0:
flair_cut[n] = flair[n]
t1_cut[n] = t1[n]
t2_cut[n] = t2[n]
return flair_cut, t1_cut, t2_cut
def normalize(flair,t1,t2):
flair = flair/np.max(flair)
blank_unscaled_array = flair.copy()
blank_unscaled_array[:] = 0
z_zoom = image_size/flair.shape[0]
y_zoom = image_size/flair.shape[1]
x_zoom = image_size/flair.shape[2]
image_data1 = skimage.transform.rescale(flair, (z_zoom, y_zoom, x_zoom))
original_array1 = image_data1.copy()
original_array1[:] = 0
image_data = np.stack([np.stack([flair], axis = 3)])
original_unscaled_array = image_data.copy()
bounds = locate_bounds(image_data)
[left,right,low,high,shallow,deep] = bounds
x_size = abs(left-right)
y_size = abs(low-high)
z_size = abs(shallow-deep)
max_size = np.max([x_size, y_size, z_size])
image_data = translate_3d(np.squeeze(image_data.copy()[0], axis = 3), [-shallow,-low,-left])
rescale_factor = (image_size*0.8)/max_size
print("rescale factor:", rescale_factor)
backscale_factor = 1/rescale_factor
image_data = skimage.transform.rescale(image_data, (rescale_factor, rescale_factor, rescale_factor))
original_scaled_down = image_data.copy()
for z,Slice in enumerate(image_data):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
try:
original_array1[z][y][x] = pixel
except:
pass
flair = original_array1.copy()
########################### T1CE ####################################
t1 = t1/np.max(t1)
image_data = translate_3d(t1, [-shallow,-low,-left])
image_data = skimage.transform.rescale(image_data, (rescale_factor, rescale_factor, rescale_factor))
original_array1[:] = 0
for z,Slice in enumerate(image_data):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
try:
original_array1[z][y][x] = pixel
except:
pass
t1ce = original_array1.copy()
########################### T2 ####################################
t2 = t2/np.max(t2)
image_data = translate_3d(t2, [-shallow,-low,-left])
image_data = skimage.transform.rescale(image_data, (rescale_factor, rescale_factor, rescale_factor))
original_array1[:] = 0
for z,Slice in enumerate(image_data):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
try:
original_array1[z][y][x] = pixel
except:
pass
t2 = original_array1.copy()
return flair,t1ce,t2
def ConvNetTumor(x,y,z):
inputs = keras.layers.Input((x,y,z, 1))
p0 = inputs
c1, p1 = down_block(p0, 16, 0.1) #128 -> 64
print(p1.shape)
c2, p2 = down_block(p1, 32, 0.1) #64 -> 32
c3, p3 = down_block(p2, 64, 0.2) #32 -> 16
c4, p4 = down_block(p3, 128, 0.3) #16->8
bn = bottleneck(p4, 256, 0.4)
print(bn.shape)
u1 = up_block(bn, c4, 128, 0.3) #8 -> 16
u2 = up_block(u1, c3, 64, 0.2) #16 -> 32
u3 = up_block(u2, c2, 32, 0.1) #32 -> 64
u4 = up_block(u3, c1, 16, 0.1) #64 -> 128
outputs = tf.keras.layers.Conv3D(1, (1, 1, 1),padding='same', activation="sigmoid")(u4)
#outputs = keras.layers.Conv3D(1, (1, 1, 1), padding="same", activation="relu")(u4)
print("out")
print(outputs.shape)
model = keras.models.Model(inputs, outputs)
return model
def ConvNetbinary(x,y,z):
inputs = keras.layers.Input((x,y,z, 3))
p0 = inputs
c1, p1 = down_block(p0, 4, 0.1) #128 -> 64
print(p1.shape)
c2, p2 = down_block(p1, 8, 0.1) #64 -> 32
c3, p3 = down_block(p2, 16, 0.2) #32 -> 16
c4, p4 = down_block(p3, 32, 0.3) #16->8
c5, p5 = down_block(p4, 64, 0.3) #16->8
c6, p6 = down_block(p5, 128, 0.3) #16->8
c7, p7 = down_block(p6, 256, 0.3) #16->8
bn = bottleneck(p7, 512, 0.4)
print(bn.shape)
u1 = up_block(bn, c7, 256, 0.3) #8 -> 16
u2 = up_block(u1, c6, 128, 0.2) #16 -> 32
u3 = up_block(u2, c5, 64, 0.1) #32 -> 64
u4 = up_block(u3, c4, 32, 0.1) #64 -> 128
u5 = up_block(u4, c3, 16, 0.1) #64 -> 128
u6 = up_block(u5, c2, 8, 0.1) #64 -> 128
u7 = up_block(u6, c1, 4, 0.1) #64 -> 128
outputs = tf.keras.layers.Conv3D(1, (1, 1, 1),padding='same', activation="sigmoid")(u7)
#outputs = keras.layers.Conv3D(1, (1, 1, 1), padding="same", activation="relu")(u4)
print("out")
print(outputs.shape)
model = keras.models.Model(inputs, outputs)
return model
def down_block_e(x, filters, dropout, kernel_size=(3, 3, 3), padding="same", strides=1):
print(x.shape)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="elu", input_shape = x.shape[1:], kernel_initializer='he_normal')(x)
c = keras.layers.Dropout(dropout)(c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="elu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
p = keras.layers.MaxPool3D(pool_size = (2, 2, 2))(c)
return c, p
def up_block_e(x, skip, filters, dropout,kernel_size=(3, 3, 3), padding="same", strides=1):
us = keras.layers.UpSampling3D((2, 2, 2))(x)
concat = keras.layers.Concatenate()([us, skip])
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="elu", input_shape = concat.shape[1:], kernel_initializer='he_normal')(concat)
c = keras.layers.Dropout(dropout)(c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="elu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
return c
def bottleneck_e(x, filters, dropout, kernel_size=(3, 3, 3), padding="same", strides=1):
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="elu", input_shape = x.shape[1:], kernel_initializer='he_normal')(x)
c = keras.layers.Dropout(dropout) (c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="elu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
return c
def ConvNetSemantic64(x,y,z):
inputs = keras.layers.Input((x,y,z, 3))
p0 = inputs
c1, p1 = down_block_e(p0, 16, 0.1) #128 -> 64
print(p1.shape)
c2, p2 = down_block_e(p1, 32, 0.1) #64 -> 32
c3, p3 = down_block_e(p2, 64, 0.2) #32 -> 16
c4, p4 = down_block_e(p3, 128, 0.3) #16->8
c5, p5 = down_block_e(p4, 256, 0.3) #16->8
c6, p6 = down_block_e(p5, 512, 0.3) #16->8
bn = bottleneck_e(p6, 1024, 0.4)
print(bn.shape)
u1 = up_block_e(bn, c6, 512, 0.3) #8 -> 16
u2 = up_block_e(u1, c5, 256, 0.2) #16 -> 32
u3 = up_block_e(u2, c4, 128, 0.1) #32 -> 64
u4 = up_block_e(u3, c3, 64, 0.1) #64 -> 128
u5 = up_block_e(u4, c2, 32, 0.1) #64 -> 128
u6 = up_block_e(u5, c1, 16, 0.1) #64 -> 128
outputs = tf.keras.layers.Conv3D(4, (1, 1, 1),padding='same', activation="softmax")(u6)
#outputs = keras.layers.Conv3D(1, (1, 1, 1), padding="same", activation="relu")(u4)
print("out")
print(outputs.shape)
model = keras.models.Model(inputs, outputs)
return model
def down_block(x, filters, dropout, kernel_size=(3, 3, 3), padding="same", strides=1):
print(x.shape)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = x.shape[1:], kernel_initializer='he_normal')(x)
c = keras.layers.Dropout(dropout)(c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
p = keras.layers.MaxPool3D(pool_size = (2, 2, 2))(c)
return c, p
def up_block(x, skip, filters, dropout,kernel_size=(3, 3, 3), padding="same", strides=1):
us = keras.layers.UpSampling3D((2, 2, 2))(x)
concat = keras.layers.Concatenate()([us, skip])
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = concat.shape[1:], kernel_initializer='he_normal')(concat)
c = keras.layers.Dropout(dropout)(c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
return c
def bottleneck(x, filters, dropout, kernel_size=(3, 3, 3), padding="same", strides=1):
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = x.shape[1:], kernel_initializer='he_normal')(x)
c = keras.layers.Dropout(dropout) (c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
return c
def ConvNetRough(x,y,z):
inputs = keras.layers.Input((x,y,z, 1))
p0 = inputs
c1, p1 = down_block(p0, 32, 0.1) #128 -> 64
print(p1.shape)
c2, p2 = down_block(p1, 64, 0.1) #64 -> 32
c3, p3 = down_block(p2, 128, 0.2) #32 -> 16
bn = bottleneck(p3, 256, 0.4)
print(bn.shape)
u1 = up_block(bn, c3, 128, 0.3) #16 -> 32
u2 = up_block(u1, c2, 64, 0.2) #16 -> 64
u3 = up_block(u2, c1, 32, 0.1) #32 -> 128
outputs = tf.keras.layers.Conv3D(1, (1, 1, 1),padding='same', activation="sigmoid")(u3)
print("out")
print(outputs.shape)
model = keras.models.Model(inputs, outputs)
return model
output_image_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/image ct visualizations/Machine Learning 2 models test"
image_size = 128
brain_seg_model_top = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_brain_top.h5"
brain_seg_model_top2 = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_brain_top.h5"
brain_seg_model_front = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_brain_front.h5"
brain_seg_model_side = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_brain_side.h5"
brain_seg_model_edges = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_brain_edges.h5"
tumor_seg_model = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_tumor.h5"
input_path = "C:/Users/JiangQin/Documents/data/raw ct files/QIN GBM Treatment Response"
output_path = "C:/Users/JiangQin/Documents/data/raw ct files/QIN GBM Treatment Response/loaded arrays 2"
input_path = "C:/Users/JiangQin/Documents/data/raw ct files/ACRIN-DSC-MR-Brain"
path = "C:/Users/JiangQin/Documents/data/raw ct files/ACRIN-DSC-MR-Brain/Clinical data/ACRIN-DSC-MR-Brain TCIA Anonymized"
path2 = "C:/Users/JiangQin/Documents/data/raw ct files/ACRIN-DSC-MR-Brain/Clinical data/ACRIN-DSC-MR-Brain-HB TCIA Anonymized"
alphabet = ["A","B","C","D"]
def load_sets(input_path,clinical_data_path,datasets=[]):
bru = 0
oof=0
valid_indexes = []
scans = []
for set_ in os.listdir(input_path):
set_path = input_path + "/" + set_
scans = []
scan_dates = []
try:
set_num = int(set_[-3:])
for scan in os.listdir(set_path):
flair = None
t1 = None
t2 = None
scan_path = set_path + '/' + scan
if os.path.isdir(scan_path):
for mri in os.listdir(scan_path):
if "t2" in mri.lower() and "cor" not in mri.lower() and "sag" not in mri.lower() and "trace" not in mri.lower() and os.path.isdir(scan_path + "/" + mri):
if t2!=None:
bru+=1
t2 = mri
if "t1" in mri.lower() and "cor" not in mri.lower() and "sag" not in mri.lower() and "post" in mri.lower() and os.path.isdir(scan_path + "/" + mri):
if t1!=None:
bru+=1
t1 = mri
if "flair" in mri.lower() and "cor" not in mri.lower() and "sag" not in mri.lower() and "t1" not in mri.lower() and os.path.isdir(scan_path + "/" + mri):
if flair!=None:
bru+=1
flair = mri
if flair is not None and t1 is not None and t2 is not None:
date = dicom.read_file(scan_path + "/" + flair+"/"+os.listdir(scan_path + "/" + flair)[0]).ClinicalTrialTimePointID
found = False
valid = False
for i in range(0,14):
try:
if i >= 10:
ia=alphabet[i%10]
else:
ia=i
data = []
blub = open(os.path.join(clinical_data_path,str("M"+str(ia))+".csv")).read()
lines = blub.split("\n")
del lines[0]
del lines[-1]
for n,line in enumerate(lines):
chars = line.split(",")
data.append([])
for char in chars:
try:
data[n].append(int(char))
except:
data[n].append(0)
data = np.stack(data)
sets = data[:,0]
dates = data[:,8]
if int(date) == data[:,8][sets.tolist().index(set_num)]:
print("uhh")
if data[:,43][sets.tolist().index(set_num)] != 0:
current_time = i
progression = int(data[:,43][sets.tolist().index(set_num)])
zones = np.stack([int(x) for x in list(data[:,31:40][sets.tolist().index(set_num)])])-1
found = True
break
except Exception as e:
pass
if found:
try:
print("found")
if current_time-1 >= 10:
ia=alphabet[(current_time-1)%10]
else:
ia=current_time-1
data = []
blub = open(os.path.join(clinical_data_path,str("M"+str(ia))+".csv")).read()
lines = blub.split("\n")
del lines[0]
del lines[-1]
for n,line in enumerate(lines):
chars = line.split(",")
data.append([])
for char in chars:
try:
data[n].append(int(char))
except:
data[n].append(0)
data = np.stack(data)
sets = data[:,0]
older_date = data[:,8][sets.tolist().index(set_num)]
for scan in os.listdir(set_path):
flair_old = None
t1_old = None
t2_old = None
scan_path_old = set_path + '/' + scan
if os.path.isdir(scan_path_old):
for mri in os.listdir(scan_path_old):
if "t2" in mri.lower() and "cor" not in mri.lower() and "sag" not in mri.lower() and "trace" not in mri.lower() and os.path.isdir(scan_path_old + "/" + mri):
if t2_old!=None:
bru+=1
t2_old = mri
if "t1" in mri.lower() and "cor" not in mri.lower() and "sag" not in mri.lower() and "post" in mri.lower() and os.path.isdir(scan_path_old + "/" + mri):
if t1_old!=None:
bru+=1
t1_old = mri
if "flair" in mri.lower() and "cor" not in mri.lower() and "sag" not in mri.lower() and "t1" not in mri.lower() and os.path.isdir(scan_path_old + "/" + mri):
if flair_old!=None:
bru+=1
flair_old = mri
if flair_old is not None and t1_old is not None and t2_old is not None:
date = dicom.read_file(scan_path_old + "/" + flair_old+"/"+os.listdir(scan_path_old + "/" + flair_old)[0]).ClinicalTrialTimePointID
old_zones = np.stack([int(x) for x in list(data[:,31:40][sets.tolist().index(set_num)])])-1
if int(older_date) == int(date):
if not np.array_equal(zones,old_zones):
print(zones,old_zones)
oof+=1
datasets.append([[scan_path_old + "/" + flair_old,scan_path_old + "/" + t1_old,scan_path_old + "/" + t2_old],
[scan_path + "/" + flair,scan_path + "/" + t1,scan_path + "/" + t2], progression])
break
except Exception as e:
print(e)
pass
except Exception as e:
print("bub",e)
pass
print(oof)
return datasets
sets = load_sets(input_path,path)
sets = load_sets(input_path,path2,sets)
binary_model_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/code files/Saved models/Tumor seg binary with t1ce t2 flair/Model 16.h5"
binary_model_path2 = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_tumor.h5"
binary_model_path3 = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/Model 34.h5"
semantic_model_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/code files/Saved models/Tumor seg semantic 64x with t1ce t2 flair/Model 81.h5"
brain_seg_t1_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/code files/Saved models/Brain Seg OASIS 36 top view/Model 51 (2).h5"
start = 8
responses = np.stack([0,0,0,0])
print(len(sets))
print("hmmm")
input()
for Set in range(start, len(sets)):
print("\n\nSet " + str(Set) + "\n\n")
for i in range(0,10):
print("\nGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG\n")
print(sets[Set])
old_mr_path_flair = sets[Set][0][0]
flair,factor,translate = preprocess_data(old_mr_path_flair)
flair_binary = np.array(flair >0.1,dtype = "float64")
write_images(flair_binary, output_image_path)
flair_binary_image_og = sitk.GetImageFromArray(flair_binary)
old_mr_path_t1 = sets[Set][0][1]
t1,_,_ = preprocess_data(old_mr_path_t1,factor,translate)
old_mr_path_t2 = sets[Set][0][2]
t2,_,_ = preprocess_data(old_mr_path_t2,factor,translate)
write_images(np.stack([flair,t1,t2],axis=-1), output_image_path)
write_images(np.stack([flair,t1,t2],axis=-1), output_image_path+"/treatment eval testing/"+str(Set)+"/image_full")
##flair brain seg
print("tranformed arrays",np.max(flair),np.max(t1),np.max(t2))
segmentations = []
brain_seg_top = load_model(brain_seg_model_top)
brain_mask_top = brain_seg_top.predict(np.stack([np.stack([flair], axis = -1)]))
binary_brain_top = binarize(brain_mask_top, 0.5)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = -1)
segmentations.append(binary_brain_top_top_ized)
print("segmented brain")
binary_brain_wo_median_combined = combine_zeros(segmentations)
median_flair = find_median_grayscale(flair[binary_brain_wo_median_combined > 0])
segmentations = []
brain_seg_top = load_model(brain_seg_model_top)
new_array_top = np.stack([np.stack([flair], axis = -1)])/(median_flair/0.2)
brain_mask_top = brain_seg_top.predict(new_array_top)
binary_brain_top = binarize(brain_mask_top, 0.7)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = -1)
segmentations.append(binary_brain_top_top_ized)
binary_brain_final_combined2 = combine_zeros(segmentations)
##t1 brain seg
segmentations = []
model_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/code files/Saved models/Brain Seg OASIS 36 top view/Model 51 (2).h5"
brain_seg = ConvNetRough(128,128,128)
brain_seg.load_weights(model_path)
brain_mask_top = brain_seg.predict(np.stack([np.stack([t1], axis = -1)]))
binary_brain_top = binarize(brain_mask_top, 0.5)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = -1)
segmentations.append(binary_brain_top_top_ized)
print("segmented brain")
print(np.stack([np.stack([t1], axis = -1)]).shape)
binary_brain_wo_median_combined = combine_zeros(segmentations)
only_brain_t1 = t1.copy()
only_brain_t1[binary_brain_wo_median_combined == 0] = 0
median_t1 = find_median_grayscale(only_brain_t1)
segmentations = []
model_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/code files/Saved models/Brain Seg OASIS 36 top view/Model 51 (2).h5"
brain_seg = ConvNetRough(128,128,128)
brain_seg.load_weights(model_path)
brain_mask_top = brain_seg.predict(np.stack([np.stack([t1/(median_t1/0.3)], axis = -1)]))
binary_brain_top = binarize(brain_mask_top, 0.5)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = -1)
segmentations.append(binary_brain_top_top_ized)
binary_brain_final_combined1 = combine_zeros(segmentations)
#write_images(binary_brain_final_combined1, output_image_path+"/treatment eval testing/"+str(Set)+"/imageblub")
#input("HUDADAWUBUDAWUP")
##t2 brain seg
segmentations = []
brain_seg_top = load_model(brain_seg_model_top)
brain_mask_top = brain_seg_top.predict(np.stack([np.stack([t2], axis = -1)]))
binary_brain_top = binarize(brain_mask_top, 0.5)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = -1)
segmentations.append(binary_brain_top_top_ized)
print("segmented brain")
binary_brain_wo_median_combined = combine_zeros(segmentations)
median = find_median_grayscale(t2[binary_brain_wo_median_combined > 0])
segmentations = []
brain_seg_top = load_model(brain_seg_model_top)
new_array_top = np.stack([np.stack([t2], axis = -1)])/(median/0.2)
brain_mask_top = brain_seg_top.predict(new_array_top)
binary_brain_top = binarize(brain_mask_top, 0.7)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = -1)
segmentations.append(binary_brain_top_top_ized)
binary_brain_final_combined = binary_brain_final_combined2.copy()#combine_zeros(segmentations)
binary_brain_final_combined[binary_brain_final_combined1 > 0] = 1
#binary_brain_final_combined[binary_brain_final_combined1 < 1] = 0
write_images(binary_brain_final_combined, output_image_path+"/treatment eval testing/"+str(Set)+"/imageblub")
#-------------
flair[binary_brain_final_combined==0] = 0
t1[binary_brain_final_combined==0] = 0
t2[binary_brain_final_combined==0] = 0
flair,t1,t2 = normalize(flair,t1,t2)
t1 /=(find_median_grayscale(t1)/0.2)
t2 /=(find_median_grayscale(t2)/0.2)
flair /=(find_median_grayscale(flair)/0.2)
write_images(np.stack([flair,t1,t2],axis=-1), output_image_path+"/treatment eval testing/"+str(Set)+"/image_brain")
only_brain = np.stack([flair,t1,t2],axis = -1)
only_brain = skimage.transform.rescale(only_brain, (0.5,0.5,0.5,1))
write_images(only_brain, output_image_path+"/treatment eval testing/"+str(Set)+"/imagebraib")
tumor_seg_binary = load_model(binary_model_path2)
tumor_mask = tumor_seg_binary.predict(np.stack([np.stack([flair/(median_flair/0.3)],axis=-1)]))
tumor_binary = np.squeeze(tumor_mask[0] > 0.9, axis = -1)
tumor_seg_channeled = ConvNetSemantic64(64,64,64)
tumor_seg_channeled.load_weights(semantic_model_path)
tumor_mask = tumor_seg_channeled.predict(np.stack([only_brain]))
print(tumor_mask.shape)
print( | np.max(tumor_mask) | numpy.max |
import argparse
import datetime
import itertools
import os
import statistics
from collections import Counter
import numpy
import pandas
import plotly.express as px
import plotly.graph_objs as go
import plotly.io as pio
import scipy
import statsmodels.api as sm
from jsonlines import jsonlines
from nltk import AnnotationTask, interval_distance, binary_distance
from scipy.signal import find_peaks
from scipy.stats import kendalltau, pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef
px.defaults.height = 1000
sentiment_columns = ["sentiment", "textblob_sentiment", "vader_sentiment"]
parser = argparse.ArgumentParser(
description='Annotations statistical analysis and comparisons with the Story output predictions.')
parser.add_argument('--batch-stats', required=False, type=str, help="CSV of the prediction batch stats.")
parser.add_argument('--position-stats', required=False, type=str, help="The per sentence prediction output.")
parser.add_argument('--annotation-stats', required=False, nargs='+', type=str, help="CSV of the prediction batch stats.")
parser.add_argument('--mturk-sentence-annotations', required=False, nargs='+', type=str, help="CSV export from Mechanical Turk.")
parser.add_argument('--firebase-sentence-annotations', required=False, type=str,
help="JSONL export of sentence annotations from Firebase.")
parser.add_argument("--no-html-plots", default=False, action="store_true", help="Don't save plots to HTML")
parser.add_argument("--no-pdf-plots", default=False, action="store_true", help="Don't save plots to PDF")
parser.add_argument('--output-dir', required=True, type=str, help="CSV containing the vector output.")
parser.add_argument('--peak-prominence-weighting', required=False, type=int, default=1.0,
help="The peak prominence weighting.")
parser.add_argument('--peak-width', default=1.0, type=float,
help="How wide must a peak be to be included. 1.0 allow a single point sentence to be a peak.")
parser.add_argument('--min-time', type=int, default=150, help="Min time in seconds.")
parser.add_argument('--exclude-worker-ids', type=str, nargs="+", required=False, help="A list of workers to exclude from the task.")
args = parser.parse_args()
genre_other = ["fantasy", "fable", "science_fiction", "fairytale"]
genre_categories = ['Answer.crime.on',
'Answer.erotic_fiction.on', 'Answer.fable.on', 'Answer.fairytale.on',
'Answer.fan_fiction.on', 'Answer.fantasy.on', 'Answer.folktale.on',
'Answer.historical_fiction.on', 'Answer.horror.on', 'Answer.humor.on',
'Answer.legend.on', 'Answer.magic_realism.on', 'Answer.meta_fiction.on',
'Answer.mystery.on', 'Answer.mythology.on', 'Answer.mythopoeia.on',
'Answer.other.on',
'Answer.realistic_fiction.on', 'Answer.science_fiction.on',
'Answer.swashbuckler.on', 'Answer.thriller.on']
other_col = 'Answer.other.on'
story_id_col = 'Answer.storyId'
worker_id_col = 'WorkerId'
annotation_columns = ['Answer.doxaResonance',
'Answer.doxaSurprise', 'Answer.doxaSuspense',
'Answer.readerEmotionalResonance',
'Answer.readerSurprise', 'Answer.readerSuspense',
'Answer.storyInterest', 'Answer.storySentiment']
genre_column = "genre"
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
print(f"Create directory: {directory}")
os.makedirs(directory)
annotation_stats_columns = ['Answer.doxaResonance',
'Answer.doxaSurprise', 'Answer.doxaSuspense',
'Answer.readerEmotionalResonance',
'Answer.readerSurprise', 'Answer.readerSuspense',
'Answer.storyInterest', 'Answer.storySentiment',
]
annotation_story_id_column = 'Input.story_id'
def label_bucket(row, attribute="measure"):
to_label = row[attribute]
if to_label in sentiment_columns:
return "sentiment"
elif "_l1" in to_label:
return "l1"
elif "_l2" in to_label:
return "l2"
elif "_l1" in to_label:
return "l1"
elif "_entropy" in to_label:
return "entropy"
else:
return "other"
def prediction_peaks(args, annotation_df, position_df):
ensure_dir(f"{args['output_dir']}/prediction_peaks/")
ensure_dir(f"{args['output_dir']}/prediction_peaks/self_correlation/")
ensure_dir(f"{args['output_dir']}/prediction_peaks/annotation_correlation/")
ensure_dir(f"{args['output_dir']}/prediction_peaks/annotation_correlation/heatmap/")
ensure_dir(f"{args['output_dir']}/prediction_peaks/annotation_correlation/scatter/")
ensure_dir(f"{args['output_dir']}/prediction_peaks/multi/")
ensure_dir(f"{args['output_dir']}/prediction_peaks/scatter/")
ensure_dir(f"{args['output_dir']}/prediction_peaks/box/")
position_df["sentence_text"] = position_df["sentence_text"].astype(str)
peaks_list = []
peaks_summary_list = []
story_ids = position_df["story_id"].unique()
columns = extract_position_measure_columns(position_df)
columns = list(set(columns).difference({"sentence_num"}))
columns.sort()
group_to_column_dict = {}
for column_groups in ['l1', 'l2', 'entropy', 'baseline', 'sentiment']:
column_list = []
for i, pred in enumerate(columns):
if column_groups not in pred and column_groups != "baseline" or (
column_groups is "baseline" and "overlap" not in pred and "embedding" not in pred):
continue
column_list.append(pred)
group_to_column_dict[column_groups] = column_list
column_group_dict = {}
for k, v in group_to_column_dict.items():
for c in v:
column_group_dict[c] = k
prom_dict = {}
for y_axis_group, y_axis_columns in group_to_column_dict.items():
prom_data = []
for c in y_axis_columns:
prom_data.extend(position_df[c].tolist())
prominence_threshold = statistics.stdev(prom_data) * args["peak_prominence_weighting"]
prom_dict[y_axis_group] = prominence_threshold
print(f"Peak prominance {prominence_threshold}")
for story_id in story_ids:
peak_story_row = position_df.loc[position_df["story_id"] == story_id]
peak_story_row = peak_story_row.sort_values(by=["sentence_num"])
for m in columns:
# print(story_df[["sentence_num","sentence_text",c]])
x = peak_story_row[m].to_list()
peak_list, summary_dict = find_and_extract_peaks(peak_story_row, "peak", x, m,
prominence=prom_dict[column_group_dict[c]],
width=args["peak_width"])
peaks_list.extend(peak_list)
peaks_summary_list.append(summary_dict)
x_inverted = [x_n * -1.0 for x_n in x]
trough_list, summary_dict = find_and_extract_peaks(peak_story_row, "trough", x_inverted, m,
prominence=prom_dict[column_group_dict[c]],
width=args["peak_width"])
peaks_list.extend(trough_list)
peaks_summary_list.append(summary_dict)
peaks_df = pandas.DataFrame(peaks_list)
peaks_df.to_csv(f"{args['output_dir']}/prediction_peaks/peaks.csv")
print(peaks_df.columns)
peaks_summary_df = pandas.DataFrame(peaks_summary_list)
peaks_summary_df.to_csv(f"{args['output_dir']}/prediction_peaks/peaks_summary.csv")
print(peaks_summary_df.columns)
peaks_df['c'] = peaks_df.apply(lambda row: label_bucket(row), axis=1)
peaks_summary_df['c'] = peaks_summary_df.apply(lambda row: label_bucket(row), axis=1)
for c in peaks_df['c'].unique():
display_df = peaks_df.loc[peaks_df['c'] == c]
for m in ["prominences", "widths"]:
fig = px.box(display_df, y=m, x="measure", color="type", notched=True)
export_plots(args, f"/prediction_peaks/box/{m}_{c}_box", fig)
fig = px.scatter(display_df, x="prominences", y="widths", color="type",
hover_name="sentence_text")
export_plots(args, f"/prediction_peaks/scatter/{c}_peak", fig)
# Calculate summary statistics for the peaks
for c in peaks_summary_df['c'].unique():
display_df = peaks_summary_df.loc[peaks_summary_df['c'] == c]
for m in ["num_of_peaks", "prominence", "width", "importance"]:
fig = px.box(display_df, y=m, x="measure", color="type", notched=True)
export_plots(args, f"/prediction_peaks/box/{m}_{c}_summary", fig)
for m in ["peak", "trough"]:
bubble_df = display_df.loc[display_df["type"] == m]
fig = px.scatter(bubble_df, x="num_of_peaks", y="width", size="prominence", color="measure", size_max=100,
hover_name="story_id")
export_plots(args, f"/prediction_peaks/scatter/{c}_{m}_summary", fig)
for c in columns:
for t in ["peak", "trough"]:
plot_df = peaks_summary_df.loc[peaks_summary_df["type"] == t]
plot_df = plot_df.loc[plot_df["measure"] == c]
fig = px.parallel_coordinates(plot_df, dimensions=["num_of_peaks", "width", "prominence", "importance"],
color="story_id", color_continuous_scale=px.colors.diverging.Tealrose, )
export_plots(args, f"/prediction_peaks/multi/{c}_{t}_parallel_summary", fig)
polar_list = []
for index, row in plot_df.iterrows():
print(row)
for c in ["num_of_peaks", "prominence", "width", "importance"]:
polar_list.append(
{"story_id": row["story_id"], "type": row["type"], "measure": row["measure"], "theta": c,
"r": row[c]})
if len(polar_list) == 0:
continue
# plot_polar_df = pandas.DataFrame(polar_list)
# print(plot_polar_df)
# fig = px.line_polar(plot_polar_df, r="r", theta="theta",
# color="story_id", line_close=True)
export_plots(args, f"/prediction_peaks/multi/{c}_{t}_polar", fig)
full_table_list = []
for m in ["num_of_peaks", "width", "prominence", "importance"]:
for m2 in ["num_of_peaks", "width", "prominence", "importance"]:
for t in ["peak", "trough"]:
for t2 in ["peak", "trough"]:
table_list = []
pear_corr = []
ken_corr = []
spear_corr = []
for c in columns:
pear_corr_ann = []
ken_corr_ann = []
spear_corr_ann = []
for c2 in columns:
type_x_df = peaks_summary_df.loc[peaks_summary_df["type"] == t]
x_df = type_x_df.loc[type_x_df["measure"] == c]
x_list = list(x_df[m])
type_y_df = peaks_summary_df.loc[peaks_summary_df["type"] == t2]
y_df = type_y_df.loc[type_y_df["measure"] == c2]
y_list = list(y_df[m2])
kendall, pearson, spearman = calculate_correlation(c, c2, table_list, x_list, y_list,
measure=m, measure2=m2, disc=t, disc2=t2)
pear_corr_ann.append(pearson)
spear_corr_ann.append(spearman)
ken_corr_ann.append(kendall)
pear_corr.append(pear_corr_ann)
ken_corr.append(ken_corr_ann)
spear_corr.append(spear_corr_ann)
full_table_list.extend(table_list)
export_correlations(args, f"/prediction_peaks/self_correlation/{m}_{m2}_{t}_{t2}_", columns,
columns, ken_corr, pear_corr,
spear_corr,
table_list)
full_table_df = pandas.DataFrame(full_table_list)
full_table_df.to_csv(f"{args['output_dir']}/prediction_peaks/self_correlation/all_correlation.csv")
agg_annotation_df = aggregate_annotations_df(annotation_df)
annotation_measures = list(agg_annotation_df['name'].unique())
annotation_measures.sort()
story_ids = annotation_df[annotation_story_id_column].unique()
story_ids = [int(s) for s in story_ids]
story_ids.sort()
full_table_list = []
for m in ["num_of_peaks", "width", "prominence", "importance"]:
for t in ["peak", "trough"]:
for b in peaks_df['c'].unique():
table_list = []
pear_corr = []
ken_corr = []
spear_corr = []
peaks_group_df = peaks_summary_df.loc[peaks_summary_df["c"] == b]
columns_in_group = list(set(columns).intersection(set(peaks_group_df["measure"].unique())))
for c in sorted(columns_in_group):
pear_corr_ann = []
ken_corr_ann = []
spear_corr_ann = []
prediction_list = []
for am in annotation_measures:
x_list = []
y_list = []
for story_id in story_ids:
agg_story_row = agg_annotation_df.loc[agg_annotation_df["story_id"] == story_id]
peak_story_row = peaks_group_df.loc[peaks_group_df["story_id"] == story_id]
if len(agg_story_row) == 0 or len(peak_story_row) == 0:
continue
pred_dict = {}
pred_dict["story_id"] = story_id
pred_dict["z"] = am
pred_row = agg_story_row.loc[agg_story_row["name"] == am]
y = float(pred_row.iloc[0]["mean"])
type_x_df = peak_story_row.loc[peak_story_row["type"] == t]
x_df = type_x_df.loc[type_x_df["measure"] == c]
x = float(x_df[m])
pred_dict["x"] = x
x_list.append(x)
pred_dict["y"] = y
y_list.append(y)
prediction_list.append(pred_dict)
if len(x_list) >= 2 and len(y_list) >= 2:
kendall, pearson, spearman = calculate_correlation(c, am, table_list, x_list, y_list,
measure=m, measure2="mean", disc=t)
print(c, am, kendall, pearson, spearman)
else:
kendall = pearson = spearman = 0.0
pear_corr_ann.append(pearson)
spear_corr_ann.append(spearman)
ken_corr_ann.append(kendall)
pear_corr.append(pear_corr_ann)
ken_corr.append(ken_corr_ann)
spear_corr.append(spear_corr_ann)
point_df = pandas.DataFrame(data=prediction_list)
fig = px.scatter(point_df, x="x", y="y", color="z", trendline="lowess", hover_name="story_id")
export_plots(args, f"/prediction_peaks/annotation_correlation/scatter/{b}_{m}_{t}_{c}_", fig)
full_table_list.extend(table_list)
export_correlations(args, f"/prediction_peaks/annotation_correlation//heatmap/{b}_{m}_{t}_",
annotation_stats_columns,
columns_in_group, ken_corr, pear_corr,
spear_corr,
table_list)
full_table_df = pandas.DataFrame(full_table_list)
full_table_df.to_csv(f"{args['output_dir']}/prediction_peaks/annotation_correlation/all_correlation.csv")
def find_and_extract_peaks(story_df, type, x, c, prominence=1.0, width=1.0):
story_peak_summary = {}
peaks, peaks_meta = find_peaks(x, width=width, prominence=prominence)
print(story_df.columns)
print(type, prominence, width, peaks, peaks_meta)
peak_list = [dict(zip(peaks_meta, i)) for i in zip(*peaks_meta.values())]
sentence_ids = story_df["sentence_id"].to_list()
story_peak_summary["story_id"] = story_df["story_id"].unique()[0]
story_peak_summary["type"] = type
story_peak_summary["num_of_peaks"] = len(peak_list)
story_peak_summary["prominence"] = 0.0
story_peak_summary["width"] = 0.0
story_peak_summary["importance"] = 0.0
story_peak_summary["story_length"] = len(sentence_ids)
story_peak_summary["measure"] = c
for index, peak in zip(peaks, peak_list):
sentence_id = sentence_ids[index]
row = story_df.loc[story_df["sentence_id"] == sentence_id]
peak["story_id"] = int(row["story_id"])
peak["sentence_id"] = int(row["sentence_id"])
peak["sentence_num"] = int(row["sentence_num"])
peak["sentence_text"] = row["sentence_text"].values[0]
peak["type"] = type
peak["measure"] = c
story_peak_summary["prominence"] += peak["prominences"]
story_peak_summary["width"] += peak["widths"]
story_peak_summary["importance"] += peak["prominences"] * peak["widths"]
if len(peak_list) == 0:
row = story_df.loc[story_df["sentence_id"] == sentence_ids[0]]
peak = {}
peak["story_id"] = int(row["story_id"])
peak["sentence_id"] = int(row["sentence_id"])
peak["sentence_num"] = int(row["sentence_num"])
peak["sentence_text"] = row["sentence_text"]
peak["type"] = type
peak["measure"] = c
peak["widths"] = 0.0
peak["prominences"] = 0.0
peak["num_of_peaks"] = 0
peak_list.append(peak)
return peak_list, story_peak_summary
def genres_per_story(args, annotation_df):
ensure_dir(f"{args['output_dir']}/genres/")
story_ids = annotation_df[story_id_col].unique()
annotation_no_others_df = annotation_df.loc[annotation_df[other_col] == False]
story_genre_data = []
for story_id in story_ids:
story_df = annotation_no_others_df.loc[annotation_no_others_df[story_id_col] == story_id]
story_genre_dict = {}
for c in genre_categories:
c_true_df = story_df.loc[story_df[c] == True]
if not c_true_df.empty:
story_genre_dict[c] = len(c_true_df)
story_genre_counter = Counter(story_genre_dict)
story_genre_list = story_genre_counter.most_common(1)
if len(story_genre_list) > 0:
story_genre, _ = story_genre_list[0]
story_genre = story_genre.replace("Answer.", "").replace(".on", "")
if story_genre not in genre_other:
story_genre = "other"
else:
story_genre = "other"
story_genre_data.append({"story_id": story_id, "genre": story_genre})
genre_df = pandas.DataFrame(data=story_genre_data)
genre_sum_df = genre_df.groupby('genre', as_index=False).count().rename(columns={"story_id": "count"})
fig = px.bar(genre_sum_df, x="genre", y="count")
export_plots(args, "/genres/bar", fig)
return genre_df
def inter_annotator_agreement(merged_sentence_df, args):
print("Calculate Interannotator Agreement")
ensure_dir(f"{args['output_dir']}/agreement/")
# Calculate inter-annotator agreement.
measure_cols = ["suspense","duration_milliseconds"]
agreement_list = []
worker_agreement_list = []
story_agreement_list = []
for c in measure_cols:
agreement_dict = {}
agreement_triples = []
for i, row in merged_sentence_df.iterrows():
value = row[c]
agreement_triples.append((str(row["worker_id"]), str(row["sentence_id"]), int(value)))
print(f"Pairs for agreement: {len(agreement_triples)}")
dist = interval_distance
t = AnnotationTask(data=agreement_triples, distance=dist)
agreement_dict["alpha"] = t.alpha()
worker_ids = merged_sentence_df["worker_id"].unique()
kendall_list = []
pearson_list = []
spearman_list = []
worker_items = []
for story_id in merged_sentence_df["story_id"].unique():
story_agreement_triples = []
story_agreement_dict = {"story_id": story_id}
story_agreement_dict["measure"] = c
x_list = []
y_list = []
story_df = merged_sentence_df.loc[merged_sentence_df["story_id"] == story_id]
story_sentences = story_df["sentence_id"].unique()
story_workers = story_df["worker_id"].unique()
for worker in story_workers:
for sentence in story_sentences:
sentence_df = story_df.loc[story_df["sentence_id"] == sentence]
worker_value = sentence_df.loc[sentence_df["worker_id"] == worker][c].values
story_agreement_dict["num_of_workers"] = len(sentence_df["worker_id"].unique())
story_agreement_triples.append((str(worker), str(sentence), int(worker_value[0])))
exclude_df = sentence_df.loc[sentence_df["worker_id"] != worker]
mean_value = exclude_df.groupby("sentence_id", as_index=False).mean()[c].values
if len(mean_value) > 0 and len(worker_value) > 0:
x_list.append(float(worker_value[0]))
y_list.append(float(mean_value[0]))
if len(x_list) >= 2 and len(y_list) == len(x_list):
story_agreement_dict["num_of_judgements"] = len(x_list)
kendall, kendall_p_value = kendalltau(x_list, y_list)
if not numpy.isnan(kendall):
story_agreement_dict["kendall"] = kendall
story_agreement_dict["kendall_p_value"] = kendall_p_value
pearson, pearson_p_value = pearsonr(x_list, y_list)
if not numpy.isnan(pearson):
story_agreement_dict["pearson"] = pearson
story_agreement_dict["pearson_p_value"] = pearson_p_value
spearman, spearman_p_value = spearmanr(x_list, y_list)
if not numpy.isnan(spearman):
story_agreement_dict["spearman"] = spearman
story_agreement_dict["spearman"] = spearman_p_value
summary_statistics(x_list, c, story_agreement_dict)
if len(story_agreement_triples) > 0 and len(set([t[0] for t in story_agreement_triples])) > 1:
t = AnnotationTask(data=story_agreement_triples, distance=dist)
story_agreement_dict["alpha"] = t.alpha()
story_agreement_list.append(story_agreement_dict)
for worker in worker_ids:
worker_agreement_triples = []
worker_dict = {"worker_id": worker}
x_list = []
y_list = []
worker_df = merged_sentence_df.loc[merged_sentence_df["worker_id"] == worker]
worker_sentences = worker_df["sentence_id"].unique()
worker_dict["num_of_stories"] = len(worker_df["story_id"].unique())
exclude_df = merged_sentence_df.loc[merged_sentence_df["worker_id"] != worker]
means_df = exclude_df.groupby("sentence_id", as_index=False).mean()
for sentence in worker_sentences:
mean_col = c
mean_value = means_df.loc[means_df["sentence_id"] == sentence][mean_col].values
worker_value = worker_df.loc[worker_df["sentence_id"] == sentence][mean_col].values
if len(mean_value) == 1 and len(worker_value) == 1:
worker_agreement_triples.append((str(worker), str(sentence), int(worker_value)))
worker_agreement_triples.append((str("other"), str(sentence), int(round(float(mean_value),0))))
x_list.append(float(worker_value))
y_list.append(float(mean_value))
if len(x_list) >= 2 and len(y_list) == len(x_list):
worker_dict["num_of_judgements"] = len(x_list)
diff = [x - y for x, y in zip(x_list, y_list)]
nobs, minmax, mean, variance, skew, kurtosis = scipy.stats.describe(diff)
worker_dict["diff_mean"] = mean
worker_dict["diff_var"] = variance
abs_diff = [abs(x) for x in diff]
nobs, minmax, mean, variance, skew, kurtosis = scipy.stats.describe(abs_diff)
worker_dict[f"abs_diff_mean"] = mean
worker_dict[f"abs_diff_var"] = variance
kendall, kendall_p_value = kendalltau(x_list, y_list)
if not numpy.isnan(kendall):
kendall_list.append(kendall)
worker_dict["kendall"] = kendall
pearson, pearson_p_value = pearsonr(x_list, y_list)
if not numpy.isnan(pearson):
pearson_list.append(pearson)
worker_dict["pearson"] = pearson
worker_dict["pearson_p_value"] = pearson_p_value
spearman, spearman_p_value = spearmanr(x_list, y_list)
if not numpy.isnan(spearman):
spearman_list.append(spearman)
worker_dict["spearman"] = spearman
worker_dict["spearman_p_value"] = spearman_p_value
worker_items.append(len(x_list))
t = AnnotationTask(data=worker_agreement_triples, distance=dist)
worker_dict["alpha"] = t.alpha()
summary_statistics(x_list, c, worker_dict)
worker_dict["measure"] = c
worker_agreement_list.append(worker_dict)
total_items = float(sum(worker_items))
probabilities = [p / total_items for p in worker_items]
agreement_dict[f"kendall_1_vs_all"] = sum([i * p for i, p in zip(kendall_list, probabilities)])
agreement_dict[f"spearman_1_vs_all"] = sum([i * p for i, p in zip(spearman_list, probabilities)])
agreement_dict[f"pearson_1_vs_all"] = sum([i * p for i, p in zip(pearson_list, probabilities)])
agreement_dict["measure"] = c
agreement_list.append(agreement_dict)
agreement_df = pandas.DataFrame(data=agreement_list)
agreement_df.to_csv(f"{args['output_dir']}/agreement/agreement.csv")
worker_agreement_df = pandas.DataFrame(data=worker_agreement_list)
worker_agreement_df = worker_agreement_df.sort_values(by=["alpha"], ascending=False)
worker_agreement_df.to_csv(f"{args['output_dir']}/agreement/worker_agreement.csv")
story_agreement_df = pandas.DataFrame(data=story_agreement_list)
story_agreement_df = story_agreement_df.sort_values(by=["alpha"], ascending=False)
story_agreement_df.to_csv(f"{args['output_dir']}/agreement/story_agreement.csv")
def plot_annotator_sentences(merged_sentence_df, args):
print(f"Plot the annotator sentences to get a visualisation of the peaks in the annotations.")
ensure_dir(f"{args['output_dir']}/consensus/")
judgement_data_list = []
story_ids = merged_sentence_df["story_id"].unique()
for story_id in story_ids:
print(story_id)
story_df = merged_sentence_df.loc[merged_sentence_df["story_id"] == story_id]
story_df = story_df.groupby(['story_id', 'sentence_id', 'sentence_num', 'worker_id'],
as_index=False).first()
if len(story_df) > 0:
print(story_df)
worker_ids = story_df["worker_id"].unique()
data = []
for worker_id in worker_ids:
worker_df = story_df.loc[story_df["worker_id"] == worker_id]
sel_col_df = worker_df[['story_id', 'sentence_id', 'sentence_num', 'worker_id', 'suspense']]
judgement_data_list.append(sel_col_df)
if len(worker_df) > 0:
worker_df = worker_df.sort_values(by=["sentence_num"])
value_series = []
value_series.append(100)
for s in worker_df["suspense"][1:]:
value = relative_to_abs_plot(s)
value_series.append(value_series[-1] + value)
trace = go.Scatter(
x=worker_df["sentence_num"],
y=value_series,
mode='lines+markers',
name=f"{worker_id}",
)
data.append(trace)
median_df = story_df.groupby(["story_id", "sentence_id", "sentence_num"], as_index=False)[
['suspense']].median().round(0)
median_df["worker_id"] = "median"
mean_df = story_df.groupby(["story_id", "sentence_id", "sentence_num"], as_index=False)[
['suspense']].mean()
mean_df["worker_id"] = "mean"
value_series = []
value_series.append(100)
for s in median_df["suspense"][1:]:
value = relative_to_abs_plot(s)
value_series.append(value_series[-1] + value)
trace = go.Scatter(
x=median_df["sentence_num"],
y=value_series,
mode='lines+markers',
name=f"median",
)
data.append(trace)
layout = go.Layout(
title=f'Sentence Annotation Plot {story_id}',
hovermode='closest',
xaxis=dict(
# title='Position',
),
yaxis=dict(
title=f"Suspense",
),
showlegend=True,
legend=dict(
orientation="h")
)
fig = go.Figure(data=data, layout=layout)
judgement_data_list.append(median_df)
judgement_data_list.append(mean_df)
export_plots(args, f"/annotation_plots/{story_id}", fig)
for c in judgement_data_list:
print(c.columns)
judgement_df = pandas.concat(judgement_data_list)
judgement_df.to_csv(f"{args['output_dir']}/consensus/judgement.csv")
def relative_to_abs_plot(s):
if s == 1:
value = -25
elif s == 2:
value = -5
elif s == 3:
value = 0
elif s == 4:
value = 5
elif s == 5:
value = 25
else:
value = 0
return value
def sentence_annotation_stats_and_agreement(args, mturk_df, firebase_data):
sentence_stats_columns = ['suspense','duration_milliseconds','sentence_len']
ensure_dir(f"{args['output_dir']}/sentence_annotations_stats/")
mturk_df = mturk_df.loc[mturk_df["AssignmentStatus"].isin(["Submitted","Approved"])]
mturk_df = calculate_task_time(args, mturk_df)
story_level_data = []
sentence_level_data = []
for f in firebase_data:
story_dict = {}
story_dict["firebase_id"] = f["id"]
story_dict["firebase_collection"] = f["collection"]
document = f["document"]
for k in set(document.keys()).difference(set(["training_annotations","sentence_annotations"])):
story_dict[k] = document[k]
for sent in document["sentence_annotations"]:
sent_dict = {**sent, **story_dict}
print(sent_dict)
sentence_level_data.append(sent_dict)
for stats_column in sentence_stats_columns:
stat_data = [s[stats_column] for s in document["sentence_annotations"]]
summary_statistics(stat_data, stats_column, story_dict)
print(story_dict)
story_level_data.append(story_dict)
story_annotation_df = pandas.DataFrame(data=story_level_data)
sentence_annotation_df = pandas.DataFrame(data=sentence_level_data)
merged_story_df = pandas.merge(story_annotation_df, mturk_df, left_on='assignment_id', right_on='AssignmentId', how='inner')
merged_sentence_df = pandas.merge(sentence_annotation_df, mturk_df, left_on='assignment_id', right_on='AssignmentId',
how='inner')
#merged_story_df = merged_story_df.fillna(value=0)
#merged_sentence_df = merged_sentence_df.fillna(value=0)
inter_annotator_agreement(merged_sentence_df, args)
plot_annotator_sentences(merged_sentence_df, args)
merged_story_df.to_csv(f"{args['output_dir']}/sentence_annotations_stats/mturk_story.csv")
merged_sentence_df.to_csv(f"{args['output_dir']}/sentence_annotations_stats/mturk_sentence.csv")
def summary_statistics(stat_data, stats_column, story_dict):
nobs, minmax, mean, variance, skew, kurtosis = scipy.stats.describe(stat_data)
story_dict[f"{stats_column}_num"] = nobs
story_dict[f"{stats_column}_min"], story_dict[f"{stats_column}_max"] = minmax
story_dict[f"{stats_column}_var"] = variance
story_dict[f"{stats_column}_skew"] = skew
story_dict[f"{stats_column}_kurt"] = kurtosis
story_dict[f"{stats_column}_perc_25"] = numpy.percentile(stat_data, 0.25)
story_dict[f"{stats_column}_perc_50"] = numpy.percentile(stat_data, 0.50)
story_dict[f"{stats_column}_perc_75"] = numpy.percentile(stat_data, 0.75)
if stats_column == "suspense":
stats, ref_dict = sm.tools.categorical(numpy.asarray(stat_data), dictnames=True, drop=True)
stats_summed = numpy.sum(stats, axis=0)
for k, v in ref_dict.items():
story_dict[f"{stats_column}_cat_{v}"] = stats_summed[k]
def calculate_task_time(args, mturk_df):
suspiciously_quick = []
task_time_taken = []
accept_time_col = mturk_df['AcceptTime']
submit_time_col = mturk_df['SubmitTime']
for accept_time, submit_time in zip(accept_time_col, submit_time_col):
mturk_date_format = "%Y-%m-%d %H:%M:%S%z"
accept_time = datetime.datetime.strptime(accept_time, mturk_date_format)
submit_time = datetime.datetime.strptime(submit_time, mturk_date_format)
time_taken = submit_time - accept_time
task_time_taken.append(time_taken)
if time_taken.seconds < args["min_time"]:
suspiciously_quick.append(True)
else:
suspiciously_quick.append(False)
mturk_df = mturk_df.assign(too_quick=pandas.Series(suspiciously_quick))
mturk_df = mturk_df.assign(total_task_duration=pandas.Series(task_time_taken))
return mturk_df
def story_stats_correlation(args):
annotation_df = None
position_df = None
pred_df = None
mturk_df = None
dfs = []
if args["mturk_sentence_annotations"] is not None and len(args["mturk_sentence_annotations"]) > 0:
for filename in args["mturk_sentence_annotations"]:
dfs.append(pandas.read_csv(filename))
mturk_df = pandas.concat(dfs, ignore_index=True)
firebase_data = []
if args["firebase_sentence_annotations"] is not None and len(args["firebase_sentence_annotations"]) > 0:
with jsonlines.open(args["firebase_sentence_annotations"]) as reader:
for obj in reader:
firebase_data.append(obj)
dfs = []
if args["annotation_stats"] is not None and len(args["annotation_stats"]) > 0:
for filename in args["annotation_stats"]:
dfs.append(pandas.read_csv(filename))
annotation_df = pandas.concat(dfs, ignore_index=True)
annotation_df = annotation_df.fillna(value=0.0)
annotation_df = map_to_binary_answers(annotation_df)
annotation_df = check_quality(annotation_df)
if args["batch_stats"] is not None and len(args["batch_stats"]) > 0:
pred_df = pandas.read_csv(args["batch_stats"])
pred_df = pred_df.fillna(value=0.0)
story_ids = pred_df["story_id"].to_list()
story_ids.sort()
if args["position_stats"] is not None and len(args["position_stats"]) > 0:
position_df = pandas.read_csv(args["position_stats"])
position_df = position_df.fillna(value=0.0)
print(mturk_df.columns)
if args["exclude_worker_ids"] is not None and len(args["exclude_worker_ids"]) > 0:
mturk_df = mturk_df[~mturk_df["WorkerId"].isin(args["exclude_worker_ids"])]
if annotation_df is not None:
genres_per_story_df = genres_per_story(args, annotation_df)
annotation_correlation(args, annotation_df, genres_per_story_df)
if mturk_df is not None and len(firebase_data):
sentence_annotation_stats_and_agreement(args, mturk_df, firebase_data)
if annotation_df is not None and position_df is not None:
prediction_peaks(args, annotation_df, position_df)
if position_df is not None:
prediction_position_correlation(args, position_df)
if pred_df is not None:
prediction_correlation(args, pred_df)
if annotation_df is not None and pred_df is not None:
prediction_annotation_correlation(args, annotation_df, pred_df)
def prediction_position_correlation(args, position_df):
ensure_dir(f"{args['output_dir']}/prediction_sentence_correlation/")
ensure_dir(f"{args['output_dir']}/prediction_sentence_correlation/box/")
ensure_dir(f"{args['output_dir']}/prediction_sentence_correlation/scatter/")
ensure_dir(f"{args['output_dir']}/prediction_sentence_correlation/heatmap/")
# print(position_df)
columns = extract_position_measure_columns(position_df)
hor_list = []
for sent_id in position_df["sentence_id"].unique():
sent_id = int(sent_id)
sent_df = position_df.loc[position_df["sentence_id"] == sent_id]
for c in list(columns):
hor_dict = {}
hor_dict["sentence_id"] = sent_id
hor_dict["story_id"] = int(sent_df["story_id"])
hor_dict["value"] = float(sent_df[c])
hor_dict["measure"] = c
hor_list.append(hor_dict)
point_df = pandas.DataFrame(data=hor_list)
point_df['c'] = point_df.apply(lambda row: label_bucket(row), axis=1)
for c in point_df['c'].unique():
display_df = point_df.loc[point_df['c'] == c]
fig = px.box(display_df, y="value", x="measure", notched=True)
export_plots(args, f"/prediction_sentence_correlation/box/{c}_box", fig)
pear_corr = []
ken_corr = []
spear_corr = []
table_list = []
for c1 in columns:
prediction_list = []
pear_corr_ann = []
ken_corr_ann = []
spear_corr_ann = []
for c2 in columns:
if "Unnamed" in c1 or "Unnamed" in c2:
continue
# print(c1, c2)
x_series = list(position_df[c1])
y_series = list(position_df[c2])
sentence_ids = list(position_df["sentence_id"])
sentence_text = list(position_df["sentence_text"])
x_list = []
y_list = []
for s, t, x, y in zip(sentence_ids, sentence_text, x_series, y_series):
try:
x = float(x)
y = float(y)
x_list.append(x)
y_list.append(y)
pred_dict = {}
pred_dict["z"] = c2
pred_dict["x"] = x
pred_dict["y"] = y
pred_dict["sentence_id"] = s
pred_dict["sentence_text"] = t
prediction_list.append(pred_dict)
except:
pass
kendall, pearson, spearman = calculate_correlation(c1, c2, table_list, x_list, y_list)
pear_corr_ann.append(pearson)
spear_corr_ann.append(spearman)
ken_corr_ann.append(kendall)
pear_corr.append(pear_corr_ann)
ken_corr.append(ken_corr_ann)
spear_corr.append(spear_corr_ann)
point_df = pandas.DataFrame(data=prediction_list)
fig = px.scatter(point_df, x="x", y="y", title=f"{c1}", color="z", trendline="lowess",
hover_name="sentence_text", color_discrete_sequence=px.colors.cyclical.IceFire, )
export_plots(args, f"/prediction_sentence_correlation/scatter/{c1}", fig)
export_correlations(args, "/prediction_sentence_correlation/heatmap/", columns, columns, ken_corr, pear_corr,
spear_corr, table_list)
def export_correlations(args, base, columns, columns2, ken_corr, pear_corr, spear_corr, table_list):
for corr_type, d in zip(["pearson", "spearman", "kendall"], [pear_corr, spear_corr, ken_corr]):
# print(measure, d, pred_measures, annotation_stats_columns)
fig = go.Figure(data=go.Heatmap(
z=d,
x=columns,
y=columns2
))
export_plots(args, f"{base}_{corr_type}_heatmap", fig)
measure_correlation = pandas.DataFrame(table_list)
measure_correlation.to_csv(f"{args['output_dir']}/{base}_corr.csv")
def calculate_correlation(c1, c2, table_list, x_list, y_list, measure="value", measure2="value", disc=None, disc2=None):
pearson, pearson_p_value = pearsonr(x_list, y_list)
table_list.append(
{"c1": c1, "c2": c2, "type": "pearson",
"correlation": pearson,
"p_value": pearson_p_value, "measure": measure, "measure_2": measure2, "disc": disc, "disc_2": disc2})
kendall, kendall_p_value = kendalltau(x_list, y_list, nan_policy="omit")
table_list.append(
{"c1": c1, "c2": c2, "type": "kendall",
"correlation": kendall,
"p_value": kendall_p_value, "measure": measure, "measure_2": measure2, "disc": disc, "disc_2": disc2})
spearman, spearman_p_value = spearmanr(x_list, y_list)
table_list.append(
{"c1": c1, "c2": c2, "type": "spearman",
"correlation": spearman,
"p_value": spearman_p_value, "measure": measure, "measure_2": measure2, "disc": disc, "disc_2": disc2})
return kendall, pearson, spearman
def export_plots(args, file, fig):
ensure_dir(f"{args['output_dir']}/{file}")
if not args["no_html_plots"]:
file_path = f"{args['output_dir']}/{file}.html"
print(f"Save plot: {file_path}")
pio.write_html(fig, file_path)
if not args["no_pdf_plots"]:
file_path = f"{args['output_dir']}/{file}.pdf"
print(f"Save plot pdf: {file_path}")
pio.write_image(fig, file_path)
def extract_position_measure_columns(position_df):
columns = list(set(list(position_df.columns)).difference(
{"name", "story_id", "sentence_id", "sentence_text", "steps", "Unnamed: 0"}))
columns.sort()
columns = [c for c in columns if
not c.endswith('_1') and not c.endswith('_2') and not c.endswith('_3') and not c.endswith('_4')]
return columns
def map_likert_to_bin(row, col):
if row[col] < 3:
return -1
else: # if row[col] > 3:
return 1
def map_to_binary_answers(annotation_df, cols=annotation_stats_columns):
for col in cols:
annotation_df[f'{col}_bin'] = annotation_df.apply(lambda row: map_likert_to_bin(row, col), axis=1)
print(annotation_df[[f'{col}', f'{col}_bin']])
return annotation_df
def annotation_correlation(args, annotation_df, genres_per_story_df):
ensure_dir(f"{args['output_dir']}/annotation_correlation/")
ensure_dir(f"{args['output_dir']}/annotation_correlation/multi/")
ensure_dir(f"{args['output_dir']}/annotation_correlation/heatmap/")
ensure_dir(f"{args['output_dir']}/annotation_correlation/scatter/")
ensure_dir(f"{args['output_dir']}/annotation_correlation/agreement/")
for genre in genre_other + ["other", "all"]:
if genre != "all":
genre_rows_df = genres_per_story_df.loc[genres_per_story_df['genre'] == genre]
annotation_genre_filtered_df = pandas.merge(annotation_df, genre_rows_df, left_on=story_id_col,
right_on="story_id", how='inner')
else:
annotation_genre_filtered_df = annotation_df
story_ids = annotation_genre_filtered_df[story_id_col].unique()
story_worker_pairs_dict = {}
for story_id in story_ids:
workers_for_story_df = annotation_genre_filtered_df.loc[
annotation_genre_filtered_df[story_id_col] == story_id]
workers_for_story = workers_for_story_df[worker_id_col].unique()
story_worker_pairs_dict[story_id] = list(itertools.combinations(workers_for_story, 2))
annotator_agreement = []
for col in annotation_columns + [f"{c}_bin" for c in annotation_columns]:
agreement_dict = {"measure": col}
x_list = []
y_list = []
for story_id, pairs in story_worker_pairs_dict.items():
if pairs is None or len(pairs) == 0:
continue
story_df = annotation_genre_filtered_df.loc[annotation_genre_filtered_df[story_id_col] == story_id]
for worker_1, worker_2 in pairs:
worker_1_values = story_df.loc[story_df[worker_id_col] == worker_1][col].values
worker_2_values = story_df.loc[story_df[worker_id_col] == worker_2][col].values
x_list.append(worker_1_values[0])
y_list.append(worker_2_values[0])
phi = matthews_corrcoef(x_list, y_list)
agreement_dict["phi"] = phi
triples = []
for idx, row in annotation_genre_filtered_df.iterrows():
worker = row[worker_id_col]
story = row[story_id_col]
metrics_col = row[col]
triples.append((str(worker), str(story), int(metrics_col)))
if "_bin" in col:
dist = binary_distance
else:
dist = interval_distance
t = AnnotationTask(data=triples, distance=dist)
agreement_dict["alpha"] = t.alpha()
worker_ids = annotation_genre_filtered_df[worker_id_col].unique()
kendall_list = []
pearson_list = []
spearman_list = []
worker_items = []
for worker in worker_ids:
x_list = []
y_list = []
worker_df = annotation_genre_filtered_df.loc[annotation_genre_filtered_df[worker_id_col] == worker]
worker_stories = worker_df[story_id_col].unique()
exclude_df = annotation_genre_filtered_df.loc[annotation_genre_filtered_df[worker_id_col] != worker]
means_df = exclude_df.groupby(story_id_col, as_index=False).mean()
for story in worker_stories:
mean_value = means_df.loc[means_df[story_id_col] == story][col].values
worker_value = worker_df.loc[worker_df[story_id_col] == story][col].values
if len(mean_value) > 0 and len(worker_value) > 0:
if len(worker_value) > 1:
worker_value = worker_value[0]
x_list.append(float(worker_value))
y_list.append(float(mean_value))
if len(x_list) >= 2 and len(y_list) == len(x_list):
kendall, _ = kendalltau(x_list, y_list)
if not | numpy.isnan(kendall) | numpy.isnan |
#! /usr/bin/env python
"""
Module for generating an RBF approximation
of temporal dynamics in POD basis space
"""
import numpy as np
import scipy
from scipy.spatial.distance import cdist
from numpy.lib.scimath import sqrt as csqrt
from scipy import interpolate
import pod as pod
import greedy as gdy
import rom as rom
import plotting as plo
def compute_rbf(Zsnap_rbf, time_rbf, ep=0.05, beta=2.5, rbf_kernel='matern', msg=False):
"""
Compute the rbf system that includes the rbf interpolation matrix,
the weights for the rbf interpolation, and the optimal scaling factor
Input::
Zsnap_rbf: Dictionary of snapshots containing projected
snapshots of every state variable
time_rbf: Array of time points for the snapshots
ep: Pre-specified minimum threshold of scaling factor
beta: Secondary parameter for some IMQ rbf kernels
Output::
Zcenter_gr: Interpolation Matrix
weights_gr: RBF interpolation coefficients
epsilon_gr: Optimal scaling factor based on fill distances
"""
# --- Recompute RBF system with optimized RBF centers
soln_names = Zsnap_rbf.keys()
rij = compute_radial_distances(Zsnap_rbf)
epsilon = np.minimum(ep, estimate_epsilon_fill(rij))
A, Zcenter = compute_interp_matrix(Zsnap_rbf, Zsnap_rbf[list(Zsnap_rbf.keys())[0]].shape[1]-1,
rbf_kernel=rbf_kernel, epsilon=epsilon, beta=beta)
if msg:
print("Epsilon specified = {0}, epsilon computed = {1}".format(
ep, estimate_epsilon_fill(rij)))
print("Epsilon used = {0}".format(epsilon))
print('Condition number of A: {0}'.format(np.linalg.cond(A)))
return Zcenter, A, epsilon
def compute_interp_matrix(Zsnap, Nt, rbf_kernel='matern', epsilon=0.05, beta=2.5):
"""
Build a radial basis function (RBF) interpolant using the entries in Zsnap to form the centers
Zsnap is input on a solution component basis, e.g., Zsnap['h'] is a N_w['h'] x N_snap array of POD modes
for the 'h' variable.
For now assumes Nt is equal to N_snap-1
Input:
:param: Zsnap -- dictionary of modes for all snapshots
:param: Nt -- Total number of snapshots - 1
:param: component_keys -- which entries to use in building the components
:param: rbf_kernel_flag -- type of kernel to use.
1 Gaussian
Multiquadric otherwise
Returns:
Zcenter -- Composite [nw_total,Nt] array of evaluation points for RBF interpolant
A -- [Nt,Nt] array containing the rbf kernel evaluations on the Zcenter vectors
rij -- [Nt,Nt] array containing the euclidean distances between all paris of vectors in Zcenter
"""
component_keys = Zsnap.keys()
# compute centers
nw_sizes = [Zsnap[key].shape[0] for key in component_keys]
nw_total = sum(nw_sizes)
Zcenter = np.zeros((nw_total, Nt), 'd')
offset = 0
for ii, key in enumerate(component_keys):
# evaluation points are (t_0,t_1,...,t_{snap-1})
Zcenter[offset:offset+Zsnap[key].shape[0], :] = Zsnap[key][:, 0:-1]
offset += Zsnap[key].shape[0]
# distances between all of the evaluation points
rij = rbf_norms(Zcenter, Zcenter)
A = compute_kernel(rij, rbf_kernel, epsilon)
return A, Zcenter
def compute_kernel(rij, rbf_kernel='matern', epsilon=0.05):
"""
Compute Nc x Nc RBF kernel matrix,
A = Phi(r,r) where Nc = # of RBF centers
"""
if rbf_kernel == 'gaussian':
A = rbf_gaussian(rij, epsilon)
elif rbf_kernel == 'inverseMQ':
A = rbf_inverse_multiquadric(rij, epsilon, beta)
elif rbf_kernel == 'matern':
A = rbf_matern(rij, epsilon)
elif rbf_kernel == 'matern1':
A = rbf_matern1(rij, epsilon)
elif rbf_kernel == 'MQ':
A = rbf_multiquadric(rij, epsilon)
return A
def compute_radial_distances(Zsnap):
"""
Routine to compute the distance between data points
"""
component_keys = Zsnap.keys()
nw_sizes = [Zsnap[key].shape[0] for key in component_keys]
nw_total = sum(nw_sizes)
Nt = Zsnap[list(Zsnap.keys())[0]].shape[1]-1
Zcenter = np.zeros((nw_total, Nt), 'd')
offset = 0
for ii, key in enumerate(component_keys):
# evaluation points are (t_0,t_1,...,t_{snap-1})
Zcenter[offset:offset+Zsnap[key].shape[0], :] = Zsnap[key][:, 0:-1]
offset += Zsnap[key].shape[0]
# distances between all of the evaluation points
rij = rbf_norms(Zcenter, Zcenter)
return rij
def build_dFdt_multistep(Z_pod, times_pod, nw, flag=None):
"""
Compute RBF weights for different high order time
discretization methods
Available routines:
1) Explicit midpoint or LeapFrog scheme,
2) 2nd & 3rd order Adams Bashforth
3) Explicit 3rd order Nystrom method
4) 2nd and 3rd order extrapolated BDF methods
======
Input-
Z_pod: dictionary of projected snapshots per component
times_pod: array of normalized time points corresponding to snapshots
nw: dictionary of number of POD modes per component
flag: Denotes the selected time discretization scheme
======
Output-
dZdata: Dictionary of time derivative of modal coefficients,
size = [ nw[key] x Nt_pod-1 ]
"""
soln_names = nw.keys()
dt_pod = times_pod[1:]-times_pod[0:-1]
dZdata = {}
for key in soln_names:
dZdata[key] = np.zeros((nw[key], times_pod.size-1), 'd')
for mode in range(nw[key]):
if flag == 'LF':
dZdata[key][mode, 0] = Z_pod[key][mode, 1]-Z_pod[key][mode, 0]
dZdata[key][mode, 0] /= dt_pod[0]
dZdata[key][mode, 1:] = Z_pod[key][mode, 2:] - \
Z_pod[key][mode, 0:-2]
dZdata[key][mode, 1:] /= (dt_pod[1:]+dt_pod[0:-1])
elif flag == 'AB2': # Adams Bashforth Order 2
dZdata[key][mode, 0] = Z_pod[key][mode, 1]-Z_pod[key][mode, 0]
dZdata[key][mode, 0] /= dt_pod[0]
for inx in range(1, times_pod.size-1):
dZdata[key][mode, inx] = 2. * \
(Z_pod[key][mode, inx+1]-Z_pod[key]
[mode, inx])/(3.*dt_pod[inx])
dZdata[key][mode, inx] += dZdata[key][mode, inx-1]/3.
elif flag == 'AB3': # Adams Bashforth Order 3
dZdata[key][mode, 0] = Z_pod[key][mode, 1]-Z_pod[key][mode, 0]
dZdata[key][mode, 0] /= dt_pod[0]
dZdata[key][mode, 1] = 2. * \
(Z_pod[key][mode, 2]-Z_pod[key][mode, 1])/(3.*dt_pod[1])
dZdata[key][mode, 1] += dZdata[key][mode, 0]/3.
for inx in range(2, times_pod.size-1):
dZdata[key][mode, inx] = 12 * \
(Z_pod[key][mode, inx+1]-Z_pod[key]
[mode, inx])/(23.*dt_pod[inx])
dZdata[key][mode, inx] += 16.*dZdata[key][mode,
inx-1]/23. - 5.*dZdata[key][mode, inx-2]/23.
elif flag == 'NY3': # Explicit Nystrom (k=3)
dZdata[key][mode, 0:2] = Z_pod[key][mode, 1:3] - \
Z_pod[key][mode, 0:2]
dZdata[key][mode, 0:2] /= dt_pod[0:2]
for inx in range(2, times_pod.size-1):
dZdata[key][mode, inx] = 3. * \
(Z_pod[key][mode, inx+1] - Z_pod[key]
[mode, inx-1]) / (7.*dt_pod[inx])
dZdata[key][mode, inx] += (2.*dZdata[key]
[mode, inx-1] - dZdata[key][mode, inx-2])/7.
elif flag == 'BDF-EP2': # Extrapolated BDF order 2
dZdata[key][mode, 0] = Z_pod[key][mode, 1]-Z_pod[key][mode, 0]
dZdata[key][mode, 0] /= dt_pod[0]
for inx in range(1, times_pod.size-1):
dZdata[key][mode, inx] = .75*Z_pod[key][mode, inx+1] - Z_pod[key][mode, inx] \
+ 0.25*Z_pod[key][mode, inx]
dZdata[key][mode, inx] /= (dt_pod[inx])
dZdata[key][mode, inx] += 0.5*dZdata[key][mode, inx-1]
elif flag == 'BDF-EP3': # Extrapolated BDF Order 3
dZdata[key][mode, 0:2] = Z_pod[key][mode, 1:3] - \
Z_pod[key][mode, 0:2]
dZdata[key][mode, 0:2] /= dt_pod[0:2]
# dZdata[key][mode,1] = 2.*(Z_pod[key][mode,2]-Z_pod[key][mode,1])/(3.*dt_pod[1]);
# dZdata[key][mode,1] += dZdata[key][mode,0]/3.
for inx in range(2, times_pod.size-1):
dZdata[key][mode, inx] = 11.*Z_pod[key][mode, inx+1]/18. - Z_pod[key][mode, inx] \
+ 0.5*Z_pod[key][mode, inx-1] - \
Z_pod[key][mode, inx-2]/9.
dZdata[key][mode, inx] /= dt_pod[inx]
dZdata[key][mode, inx] += dZdata[key][mode,
inx-1] - dZdata[key][mode, inx-2]/3.
else:
dZdata[key][mode, :] = Z_pod[key][mode, 1:] - \
Z_pod[key][mode, 0:-1]
dZdata[key][mode, :] /= dt_pod
return dZdata
def build_dFdt_weights_multistep(Z_pod, times_pod, nw, A, flag=None):
"""
Compute RBF weights for different high order time
discretization methods
Available routines:
1) Explicit midpoint or LeapFrog scheme,
2) 2nd & 3rd order Adams Bashforth
3) Explicit 3rd order Nystrom method
4) 2nd and 3rd order extrapolated BDF methods
======
Input-
Z_pod: dictionary of projected snapshots per component
times_pod: array of normalized time points corresponding to snapshots
nw: dictionary of number of POD modes per component
A: RBF interpolation matrix
flag: Denotes the selected time discretization scheme
======
Output-
W_p: dictionary of RBF interpolation coefficients, size = [ nw[key] x Nt_pod-1 ]
"""
W_p = {}
soln_names = Z_pod.keys()
dZdata = build_dFdt_multistep(Z_pod, times_pod, nw, flag=flag)
for key in soln_names:
W_p[key] = np.zeros((nw[key], times_pod.size-1), 'd')
for mode in range(nw[key]):
W_p[key][mode, :] = np.linalg.solve(A, dZdata[key][mode, :])
return W_p, dZdata
def rbf_multiquadric(r, epsilon=1.0, beta=2.5):
"""
multiquadric
"""
return np.sqrt((epsilon*r)**2 + 1.0)
# return np.sqrt((1.0/epsilon*r)**2 + 1.0)
def rbf_inverse_multiquadric(r, epsilon=1.0, beta=2.5):
"""
inverse multiquadric
"""
return np.power((epsilon*r)**2 + 1, -beta)
# return np.power(1.0 + (1.0/epsilon*r)**2,-beta)
def rbf_gaussian(r, epsilon=1.0, beta=2.5):
"""
gaussian
"""
return np.exp(-(epsilon*r)**2)
def rbf_matern(r, epsilon=1.0, beta=2.5):
"""
matern kernel, order 0
"""
return np.exp(-epsilon*r)
def rbf_matern1(r, epsilon=1.0, beta=2.5):
"""
matern kernel, order 1
"""
return np.exp(-epsilon*r)*(1 + epsilon*r)
def rbf_norms(x1, x2, kernel=None):
"""
Computes the distance matrix for vector arguments x1, x2
x1 : N x M_A matrix, M_A vectors in N-space
x2 : N x M_B matrix, M_B vectors in N-space
If kernel is None, returns euclidean distance,
else returns 1D distance matrix for Exp Sin Sqd kernel
"""
if kernel is None:
return scipy.spatial.distance.cdist(x1.T, x2.T, 'euclidean')
else:
assert x1.shape[1] == x2.shape[1], 'x1 and x2 dimensions donot match'
DM = np.empty((x1.shape[0], x2.shape[0]), dtype=np.double)
for i in np.arange(0, x1.shape[0]):
for j in | np.arange(0, x2.shape[0]) | numpy.arange |
import numpy as np
import librosa
class Element(object):
def __init__(self,
num_mic=4,
sampling_frequency=16000,
fft_length=512,
fft_shift=256,
sound_speed=343,
theta_step=1,
frame_num=1000000):
self.num_mic = num_mic
self.mic_angle_vector = np.array([45, 315, 225, 135])
# self.mic_angle_vector = np.array([315, 45, 225, 135])
self.mic_diameter = 0.064
self.sampling_frequency = sampling_frequency
self.fft_length = fft_length
self.fft_shift = fft_shift
self.sound_speed = sound_speed
self.theta_step = theta_step
self.frame_num = frame_num
def get_sterring_vector(self, look_direction):
'''
return: sv of shape (N//2+1,num_mic)
'''
frequency_vector = librosa.fft_frequencies(self.sampling_frequency, self.fft_length)
steering_vector = np.exp(1j * 2 * np.pi / self.sound_speed * self.mic_diameter / 2 *
np.einsum("i,j->ij",frequency_vector, np.cos( | np.deg2rad(look_direction) | numpy.deg2rad |
import unittest
from pythran.tests import TestEnv
import numpy
import tempfile
import os
from pythran.typing import NDArray, List, Tuple
@TestEnv.module
class TestNumpyFunc0(TestEnv):
def test_extended_sum0(self):
self.run_test("def numpy_extended_sum0(a): import numpy ; return numpy.sum(a)",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum0=[NDArray[int,:,:,:,:]])
def test_extended_sum1(self):
self.run_test("def numpy_extended_sum1(a): import numpy ; return numpy.sum(a[1])",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum1=[NDArray[int,:,:,:,:]])
def test_extended_sum2(self):
self.run_test("def numpy_extended_sum2(a): import numpy ; return numpy.sum(a[1,0])",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum2=[NDArray[int,:,:,:,:]])
def test_extended_sum3(self):
self.run_test("def numpy_extended_sum3(a): import numpy ; return numpy.sum(a[1:-1])",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum3=[NDArray[int,:,:,:,:]])
def test_extended_sum4(self):
self.run_test("def numpy_extended_sum4(a): import numpy ; return numpy.sum(a[1:-1,0])",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum4=[NDArray[int,:,:,:,:]])
def test_extended_sum5(self):
self.run_test("def numpy_extended_sum5(a): import numpy ; return numpy.sum(a)",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum5=[NDArray[int,:,:,:,:]])
def test_out_sum0(self):
self.run_test("def numpy_out_sum0(a, b): import numpy ; return numpy.sum(a, axis=0, out=b)",
numpy.arange(10).reshape((5,2)),
numpy.zeros(2, dtype=int),
numpy_out_sum0=[NDArray[int,:,:], NDArray[int,:]])
def test_out_sum1(self):
self.run_test("def numpy_out_sum1(a, b): import numpy ; return numpy.sum(a, axis=0, out=b)",
numpy.arange(10).reshape((5,2)),
numpy.ones(2, dtype=int),
numpy_out_sum1=[NDArray[int,:,:], NDArray[int,:]])
def test_out_sum2(self):
self.run_test("def numpy_out_sum2(a, b): import numpy ; return numpy.sum(a, axis=1, out=b)",
numpy.arange(10).reshape((5,2)),
numpy.zeros(5, dtype=int),
numpy_out_sum2=[NDArray[int,:,:], NDArray[int,:]])
def test_numpy_shape_as_function(self):
self.run_test("def numpy_shape_as_function(a): import numpy ; return numpy.shape(a)",
numpy.ones(3, numpy.int16),
numpy_shape_as_function=[NDArray[numpy.int16,:]])
def test_numpy_size_as_function(self):
self.run_test("def numpy_size_as_function(a): import numpy ; return numpy.size(a)",
numpy.ones(3, numpy.int16),
numpy_size_as_function=[NDArray[numpy.int16,:]])
def test_numpy_ndim_as_function(self):
self.run_test("def numpy_ndim_as_function(a): import numpy ; return numpy.ndim(a)",
numpy.ones(3, numpy.int16),
numpy_ndim_as_function=[NDArray[numpy.int16,:]])
def test_frexp0(self):
self.run_test("def np_frexp0(a): import numpy as np ; return np.frexp(a)", 1.5, np_frexp0=[float])
def test_frexp1(self):
self.run_test("def np_frexp1(a): import numpy as np ; return np.frexp(a)", numpy.array([1.1,2.2,3.3]), np_frexp1=[NDArray[float,:]])
def test_frexp2(self):
self.run_test("def np_frexp2(a): import numpy as np ; return np.frexp(a+a)", numpy.array([1.1,2.2,3.3]), np_frexp2=[NDArray[float,:]])
def test_ndindex0(self):
self.run_test("def np_ndindex0(): import numpy as np ; return [x for x in np.ndindex(5,6)]",
np_ndindex0=[])
def test_ndindex1(self):
self.run_test("def np_ndindex1(a): import numpy as np ; return [x for x in np.ndindex(a)]", 3, np_ndindex1=[int])
def test_ndindex2(self):
self.run_test("def np_ndindex2(n): import numpy as np ; return [x for x in np.ndindex((n,n))]", 3, np_ndindex2=[int])
def test_ndenumerate0(self):
self.run_test("def np_ndenumerate0(a): import numpy as np ; return [x for x in np.ndenumerate(a)]", numpy.array([[1, 2], [3, 4]]), np_ndenumerate0=[NDArray[int,:,:]])
def test_ndenumerate1(self):
self.run_test("def np_ndenumerate1(a): import numpy as np ; return [x for x in np.ndenumerate(a)]", numpy.array([1, 2, 3, 4]), np_ndenumerate1=[NDArray[int,:]])
def test_nansum0(self):
self.run_test("def np_nansum0(a): import numpy as np ; return np.nansum(a)" , numpy.array([[1, 2], [3, numpy.nan]]), np_nansum0=[NDArray[float,:,:]])
def test_nansum1(self):
self.run_test("def np_nansum1(a): import numpy as np ; return np.nansum(a)" , numpy.array([[1, 2], [numpy.NINF, numpy.nan]]), np_nansum1=[NDArray[float,:,:]])
def test_nansum2(self):
self.run_test("def np_nansum2(a): import numpy as np ; return np.nansum(a)", [1., numpy.nan], np_nansum2=[List[float]])
def test_nanmin0(self):
self.run_test("def np_nanmin0(a): import numpy as np ; return np.nanmin(a)" , numpy.array([[1, 2], [3, numpy.nan]]), np_nanmin0=[NDArray[float,:,:]])
def test_nanmin1(self):
self.run_test("def np_nanmin1(a): import numpy as np ; return np.nanmin(a)" , numpy.array([[1, 2], [numpy.NINF, numpy.nan]]), np_nanmin1=[NDArray[float,:,:]])
def test_nanmax0(self):
self.run_test("def np_nanmax0(a): import numpy as np ; return np.nanmax(a)" , numpy.array([[1, 2], [3, numpy.nan]]), np_nanmax0=[NDArray[float,:,:]])
def test_nanmax1(self):
self.run_test("def np_nanmax1(a): import numpy as np ; return np.nanmax(a)" , numpy.array([[1, 2], [numpy.inf, numpy.nan]]) , np_nanmax1=[NDArray[float,:,:]])
def test_np_residual(self):
self.run_test("""import numpy as np
def np_residual():
nx, ny, nz= 75, 75, 100
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
P = np.ones((nx, ny, nz), np.float64)
d2x = np.zeros_like(P)
d2y = np.zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y + 5*np.cosh(P).mean()**2
""", np_residual=[])
def test_np_func2(self):
self.run_test("""import numpy as np
def np_func2(x):
f = [x[0] * np.cos(x[1]) - 4,
x[1]*x[0] - x[1] - 5]
df = np.array([[np.cos(x[1]), -x[0] * np.sin(x[1])],
[x[1], x[0] - 1]])
return f, df
""", [1.0, 2.0, 3.0], np_func2=[List[float]])
def test_np_peval(self):
self.run_test("""import numpy
def np_peval(x, p):
return p[0]*numpy.sin(2*numpy.pi*p[1]*x+p[2])
""", 12., [1.0, 2.0, 3.0], np_peval=[float, List[float]])
def test_np_residuals(self):
self.run_test("""import numpy
def np_residuals():
x = numpy.arange(0,6e-2,6e-2/30)
A,k,theta = 10, 1.0/3e-2, numpy.pi/6
return A*numpy.sin(2*numpy.pi*k*x+theta)
""", np_residuals=[])
def test_np_func_deriv(self):
self.run_test("""import numpy
def np_func_deriv(x, sign=1.0):
dfdx0 = sign*(-2*x[0] + 2*x[1] + 2)
dfdx1 = sign*(2*x[0] - 4*x[1])
return numpy.array([ dfdx0, dfdx1 ])
""", [-1.0, 1.0], -1.0, np_func_deriv=[List[float], float])
def test_np_func(self):
self.run_test("""import numpy
def np_func(x, sign=1.0):
return sign*(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2)
""", [-1.0, 1.0], -1.0, np_func=[List[float], float])
def test_rosen_hess_p(self):
self.run_test("""import numpy
def np_rosen_hess_p(x, p):
x = numpy.asarray(x)
Hp = numpy.zeros_like(x)
Hp[0] = (1200*x[0]**2 - 400*x[1] + 2)*p[0] - 400*x[0]*p[1]
Hp[1:-1] = -400*x[:-2]*p[:-2]+(202+1200*x[1:-1]**2-400*x[2:])*p[1:-1] \
-400*x[1:-1]*p[2:]
Hp[-1] = -400*x[-2]*p[-2] + 200*p[-1]
return Hp
""",
numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]),
numpy.array([2.3, 1.7, 1.8, 2.9, 2.2]),
np_rosen_hess_p=[NDArray[float,:], NDArray[float,:]])
def test_rosen_hess(self):
self.run_test("""import numpy
def np_rosen_hess(x):
x = numpy.asarray(x)
H = numpy.diag(-400*x[:-1],1) - numpy.diag(400*x[:-1],-1)
diagonal = numpy.zeros_like(x)
diagonal[0] = 1200*x[0]**2-400*x[1]+2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200*x[1:-1]**2 - 400*x[2:]
H = H + numpy.diag(diagonal)
return H
""",
numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]),
np_rosen_hess=[NDArray[float,:]])
def test_rosen_der(self):
self.run_test("""import numpy
def np_rosen_der(x):
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros_like(x)
der[1:-1] = 200*(xm-xm_m1**2) - 400*(xm_p1 - xm**2)*xm - 2*(1-xm)
der[0] = -400*x[0]*(x[1]-x[0]**2) - 2*(1-x[0])
der[-1] = 200*(x[-1]-x[-2]**2)
return der
""",
numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]),
np_rosen_der=[NDArray[float,:]])
def test_rosen(self):
self.run_test("import numpy\ndef np_rosen(x): return sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)",
numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]),
np_rosen=[NDArray[float,:]])
def test_nanargmax0(self):
self.run_test("def np_nanargmax0(a): from numpy import nanargmax; return nanargmax(a)", numpy.array([[numpy.nan, 4], [2, 3]]), np_nanargmax0=[NDArray[float,:,:]])
def test_nanargmin0(self):
self.run_test("def np_nanargmin0(a): from numpy import nanargmin ; return nanargmin(a)", numpy.array([[numpy.nan, 4], [2, 3]]), np_nanargmin0=[NDArray[float,:,:]])
def test_nan_to_num0(self):
self.run_test("def np_nan_to_num0(a): import numpy as np ; return np.nan_to_num(a)", numpy.array([numpy.inf, -numpy.inf, numpy.nan, -128, 128]), np_nan_to_num0=[NDArray[float,:]])
def test_median0(self):
self.run_test("def np_median0(a): from numpy import median ; return median(a)", numpy.array([[1, 2], [3, 4]]), np_median0=[NDArray[int,:,:]])
def test_median1(self):
self.run_test("def np_median1(a): from numpy import median ; return median(a)", numpy.array([1, 2, 3, 4,5]), np_median1=[NDArray[int,:]])
def test_median2(self):
self.run_test("def np_median2(a): from numpy import median ; return median(a, None)", numpy.array([1, 2, 3, 4,5]), np_median2=[NDArray[int,:]])
def test_median3(self):
self.run_test("def np_median3(a): from numpy import median ; return median(a, 0)", numpy.array([[1, 2, 3], [4,5,6]]), np_median3=[NDArray[int,:,:]])
def test_median4(self):
self.run_test("def np_median4(a): from numpy import median ; return median(a, 1)", numpy.array([[1, 2, 3], [4,5,6]]), np_median4=[NDArray[int,:,:]])
def test_median5(self):
self.run_test("def np_median5(a): from numpy import median ; return median(a, -1)", numpy.array([[[1], [2], [3]], [[4],[5],[6]]]), np_median5=[NDArray[int,:,:,:]])
def test_median6(self):
self.run_test("def np_median6(l): from numpy import median ; return l + median(l)", numpy.array([3, 1]), np_median6=[NDArray[int, :]])
def test_mean0(self):
self.run_test("def np_mean0(a): from numpy import mean ; return mean(a)", numpy.array([[1, 2], [3, 4]]), np_mean0=[NDArray[int,:,:]])
def test_mean1(self):
self.run_test("def np_mean1(a): from numpy import mean ; return mean(a, 1)", numpy.array([[1, 2], [3, 4.]]), np_mean1=[NDArray[float,:,:]])
def test_mean2(self):
self.run_test("def np_mean2(a): from numpy import mean ; return mean(a)", numpy.array([[[1, 2], [3, 4.]]]), np_mean2=[NDArray[float,:,:,:]])
def test_mean3(self):
self.run_test("def np_mean3(a): from numpy import mean ; return mean(a, 0)", numpy.array([[[1, 2], [3, 4.]]]), np_mean3=[NDArray[float,:,:,:]])
def test_mean4(self):
self.run_test("def np_mean4(a): from numpy import mean ; return mean(a, 1)", numpy.array([[[1, 2], [3, 4.]]]), np_mean4=[NDArray[float,:,:,:]])
def test_mean5(self):
self.run_test("def np_mean5(a): from numpy import mean ; return mean(a, 2)", numpy.array([[[1, 2], [3, 4.]]]), np_mean5=[NDArray[float,:,:,:]])
def test_var0(self):
self.run_test("def np_var0(a): return a.var()", numpy.array([[1, 2], [3, 4]], dtype=float), np_var0=[NDArray[float,:,:]])
def test_var1(self):
self.run_test("def np_var1(a): from numpy import var ; return var(a, 1)", numpy.array([[1, 2], [3, 4.]]), np_var1=[NDArray[float,:,:]])
def test_var2(self):
self.run_test("def np_var2(a): from numpy import var ; return var(a)", numpy.array([[[1, 2], [3, 4.]]]), np_var2=[NDArray[float,:,:,:]])
def test_var3(self):
self.run_test("def np_var3(a): from numpy import var ; return var(a, 0)", numpy.array([[[1, 2], [3, 4.]]]), np_var3=[NDArray[float,:,:,:]])
def test_var4(self):
self.run_test("def np_var4(a): from numpy import var ; return var(a, 1)", numpy.array([[[1, 2], [3, 4.]]]), np_var4=[NDArray[float,:,:,:]])
def test_var5(self):
self.run_test("def np_var5(a): from numpy import var ; return var(a, 2)", numpy.array([[[1, 2], [3, 4.]]]), np_var5=[NDArray[float,:,:,:]])
def test_var6(self):
self.run_test("def np_var6(a): from numpy import var ; return var(1j * a)", numpy.array([[[1, 2], [3, 4.]]]), np_var6=[NDArray[float,:,:,:]])
def test_var7(self):
self.run_test("def np_var7(a): from numpy import var ; return var(1j * a, 2)", numpy.array([[[1, 2], [3, 4.]]]), np_var7=[NDArray[float,:,:,:]])
def test_var8(self):
self.run_test("def np_var8(a): from numpy import var ; return var(1j * a, 2)", numpy.array([[[1, 2], [3, 4]]]), np_var8=[NDArray[int,:,:,:]])
def test_var9(self):
self.run_test("def np_var9(a): from numpy import var ; return var(1j * a)", numpy.array([[[1, 2], [3, 4]]]), np_var9=[NDArray[int,:,:,:]])
def test_std0(self):
self.run_test("def np_std0(a): from numpy import std ; return std(a)", numpy.array([[[1, 2], [3, 4]]]), np_std0=[NDArray[int, :, :, :]])
def test_std1(self):
self.run_test("def np_std1(a): from numpy import std ; return std(a, 0)", numpy.array([[[1, 2], [3, 4]]]), np_std1=[NDArray[int, :, :, :]])
def test_std2(self):
self.run_test("def np_std2(a): from numpy import std ; return std(a, 1)", numpy.array([[[1, 2], [3, 4]]]), np_std2=[NDArray[int, :, :, :]])
def test_std3(self):
self.run_test("def np_std3(a): from numpy import std ; return std(1j*a, 1)", numpy.array([[[1, 2], [3, 4]]]), np_std3=[NDArray[int, :, :, :]])
def test_logspace0(self):
self.run_test("def np_logspace0(start, stop): from numpy import logspace ; start, stop = 3., 4. ; return logspace(start, stop, 4)", 3., 4., np_logspace0=[float, float])
def test_logspace1(self):
self.run_test("def np_logspace1(start, stop): from numpy import logspace ; return logspace(start, stop, 4, False)", 3., 4., np_logspace1=[float, float])
def test_logspace2(self):
self.run_test("def np_logspace2(start, stop): from numpy import logspace ; return logspace(start, stop, 4, True, 2.0)", 3., 4., np_logspace2=[float, float])
def test_lexsort0(self):
self.run_test("def np_lexsort0(surnames): from numpy import lexsort ; first_names = ('Heinrich', 'Galileo', 'Gustav') ; return lexsort((first_names, surnames))", ('Hertz', 'Galilei', 'Hertz'), np_lexsort0=[Tuple[str, str, str]])
def test_lexsort1(self):
self.run_test("def np_lexsort1(a): from numpy import lexsort ; b = [1,5,1,4,3,4,4] ; return lexsort((a,b))", [9,4,0,4,0,2,1], np_lexsort1=[List[int]])
def test_lexsort2(self):
self.run_test("def np_lexsort2(a): from numpy import lexsort ; return lexsort((a+1,a-1))", numpy.array([1,5,1,4,3,4,4]), np_lexsort2=[NDArray[int,:]])
def test_issctype0(self):
self.run_test("def np_issctype0(): from numpy import issctype, int32 ; a = int32 ; return issctype(a)", np_issctype0=[])
def test_issctype1(self):
self.run_test("def np_issctype1(): from numpy import issctype ; a = list ; return issctype(a)", np_issctype1=[])
def test_issctype2(self):
self.run_test("def np_issctype2(a): from numpy import issctype ; return issctype(a)", 3.1, np_issctype2=[float])
def test_isscalar0(self):
self.run_test("def np_isscalar0(a): from numpy import isscalar ; return isscalar(a)", 3.1, np_isscalar0=[float])
def test_isscalar1(self):
self.run_test("def np_isscalar1(a): from numpy import isscalar ; return isscalar(a)", [3.1], np_isscalar1=[List[float]])
def test_isscalar2(self):
self.run_test("def np_isscalar2(a): from numpy import isscalar ; return isscalar(a)", '3.1', np_isscalar2=[str])
def test_isrealobj0(self):
self.run_test("def np_isrealobj0(a): from numpy import isrealobj ; return isrealobj(a)", numpy.array([1,2,3.]), np_isrealobj0=[NDArray[float,:]])
def test_isrealobj1(self):
self.run_test("def np_isrealobj1(a): from numpy import isrealobj ; return isrealobj(a)", numpy.array([1,2,3.,4 + 1j]).reshape((2,2)), np_isrealobj1=[NDArray[complex,:,:]])
def test_isreal0(self):
self.run_test("def np_isreal0(a): from numpy import isreal ; return isreal(a)", numpy.array([1,2,3.]), np_isreal0=[NDArray[float,:]])
def test_isreal1(self):
self.run_test("def np_isreal1(a): from numpy import isreal ; return isreal(a)", numpy.array([1,2,3.,4 + 1j]).reshape((2,2)), np_isreal1=[NDArray[complex,:,:]])
def test_iscomplex0(self):
self.run_test("def np_iscomplex0(a): from numpy import iscomplex ; return iscomplex(a)", numpy.array([1, 2, 3.]), np_iscomplex0=[NDArray[float,:]])
def test_iscomplex1(self):
self.run_test("def np_iscomplex1(a): from numpy import iscomplex ; return iscomplex(a)", numpy.array([1,2,3.,4 + 1j]).reshape((2,2)), np_iscomplex1=[NDArray[complex,:,:]])
def test_intersect1d0(self):
self.run_test("def np_intersect1d0(a): from numpy import intersect1d ; b = [3, 1, 2, 1] ; return intersect1d(a,b)", [1, 3, 4, 3], np_intersect1d0=[List[int]])
def test_insert0(self):
self.run_test("def np_insert0(a): from numpy import insert ; return insert(a, 1, 5)", numpy.array([[1, 1], [2, 2], [3, 3]]), np_insert0=[NDArray[int,:,:]])
def test_insert1(self):
self.run_test("def np_insert1(a): from numpy import insert ; return insert(a, [1,2], [5,6])", numpy.array([[1, 1], [2, 2], [3, 3]]), np_insert1=[NDArray[int,:,:]])
def test_insert2(self):
self.run_test("def np_insert2(a): from numpy import insert ; return insert(a, [1,1], [5.2,6])", numpy.array([[1, 1], [2, 2], [3, 3]]), np_insert2=[NDArray[int,:,:]])
def test_inner0(self):
self.run_test("def np_inner0(x): from numpy import inner ; y = 3 ; return inner(x,y)", 2, np_inner0=[int])
def test_inner1(self):
self.run_test("def np_inner1(x): from numpy import inner ; y = [2, 3] ; return inner(x,y)", [2, 3], np_inner1=[List[int]])
def test_indices0(self):
self.run_test("def np_indices0(s): from numpy import indices ; return indices(s)", (2, 3), np_indices0=[Tuple[int, int]])
def test_identity0(self):
self.run_test("def np_identity0(a): from numpy import identity ; return identity(a)", 3, np_identity0=[int])
def test_identity1(self):
self.run_test("def np_identity1(a): from numpy import identity ;return identity(a)", 4, np_identity1=[int])
def test_tofile0(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.randint(0,2**8,1000).astype(numpy.uint8)
try:
self.run_test("def np_tofile0(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile0=[NDArray[numpy.uint8,:], str])
finally:
os.remove(temp_name)
def test_tofile1(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.randint(0,2**16,1000).astype(numpy.uint16)
try:
self.run_test("def np_tofile1(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile1=[NDArray[numpy.uint16,:], str])
finally:
os.remove(temp_name)
def test_tofile2(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.randint(0,2**32,1000).astype(numpy.uint32)
try:
self.run_test("def np_tofile2(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile2=[NDArray[numpy.uint32,:], str])
finally:
os.remove(temp_name)
def test_tofile3(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.random(1000).astype(numpy.float32)
try:
self.run_test("def np_tofile3(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile3=[NDArray[numpy.float32,:], str])
finally:
os.remove(temp_name)
def test_tofile4(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.random(1000).astype(numpy.float64)
try:
self.run_test("def np_tofile4(x,file): import numpy ; x.tofile(file); return numpy.fromfile(file)", x, temp_name, np_tofile4=[NDArray[numpy.float64,:], str])
finally:
os.remove(temp_name)
def test_fromfile0(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.randint(0,2**8,1000).astype(numpy.uint8)
x.tofile(temp_name)
try:
self.run_test("def np_fromfile0(file): from numpy import fromfile, uint8 ; return fromfile(file, uint8)", temp_name, np_fromfile0=[str])
finally:
os.remove(temp_name)
def test_fromfile1(self):
temp_name = tempfile.mkstemp()[1]
x = numpy.random.randint(0,2**16,1000).astype(numpy.uint16)
x.tofile(temp_name)
try:
self.run_test("def np_fromfile1(file): from numpy import fromfile, uint16 ; return fromfile(file, uint16)", temp_name, np_fromfile1=[str])
finally:
os.remove(temp_name)
def test_fromfile2(self):
temp_name = tempfile.mkstemp()[1]
x = | numpy.random.randint(0,2**32,1000) | numpy.random.randint |
from .particle import Particle
import numpy as np
from scipy import constants
# Globals
epsilon_0 = constants.epsilon_0
pi = constants.pi
class Charge(Particle):
"""Base class for a point electric charge
Attributes:
position: particle position, 1D numpy array of length 3
charge: electric charge in Coulomb.
Methods:
getPosition(): Returns numpy array
"""
def __init__(
self,
position,
charge,
velocity=[0, 0, 0],
acceleration=[0, 0, 0],
mass=np.inf,
):
"""Charge class initializer
Args:
position: position. units: meters. numpy array or a list.
charge: electric charge. units: Coulombs. float.
"""
Particle.__init__(self, position, velocity, acceleration, mass)
self.charge = charge
@property
def q(self):
"""Electric charge value in Coulomb"""
return self.charge
def field(self, fpos, type="analytical", h=0.001, component=None):
"""Electric field at a given position.
Args:
fpos: field position. numpy array or a list.
type: type of field calculation. 'analytical' (default) or from
gradient of potential.
h: potential gradient spatial difference.
component: 'x', 'y', 'z', or None (default)
"""
fpos = np.asarray(fpos)
if not self.__verify3D__(fpos):
raise TypeError(
"Initializer argument must be a \
1D numpy array or list of length 3"
)
if np.array_equal(fpos, self.position):
electric_field = fpos.astype(float)
electric_field.fill(np.nan)
return electric_field
if type == "analytical":
displacement = fpos - self.position
electric_field = (
self.q
* (4 * pi * epsilon_0) ** -1
* displacement
* np.linalg.norm(displacement) ** -3
)
if type == "potential":
potential_grid = np.empty([3, 3, 3], dtype=object)
x = | np.linspace(fpos[0] - h, fpos[0] + h, 3) | numpy.linspace |
import numpy as np
import logging
import itertools
import operator
from gelcoverage.tools.bed_reader import BedReader
from gelcoverage.tools.bigwig_reader import BigWigReader
import gelcoverage.constants as constants
def find_gaps(coverages, start_position, coverage_threshold):
"""
Find continuous genomic positions under a given threshold coverage_threshold.
:param coverages: list of depth of coverage values
:param start_position: starting position of the coverages sequence
:param coverage_threshold: the coverage threshold to determine gaps
:return: the gaps start and end genomic coordinates in JSON-friendly format.
Chromosome is not set as this information
will be embedded within an exon-transcript-gene where the chromosome is available.
"""
end = start_position + len(coverages)
open_gap = False
current_gap = {}
gaps = []
# Iterates through every coverage position
for idx, value in enumerate(coverages):
if value < coverage_threshold and not open_gap:
open_gap = True
current_gap[constants.GAP_START] = start_position + idx
elif value >= coverage_threshold and open_gap:
open_gap = False
current_gap[constants.GAP_END] = start_position + idx - 1
current_gap[constants.GAP_LENGTH] = current_gap[constants.GAP_END] - current_gap[constants.GAP_START] + 1
gaps.append(current_gap)
current_gap = {}
# Closes the last gap when it extends until the last position
if open_gap:
current_gap[constants.GAP_END] = end
current_gap[constants.GAP_LENGTH] = current_gap[constants.GAP_END] - current_gap[constants.GAP_START] + 1
gaps.append(current_gap)
return gaps
def compute_exon_level_statistics(coverages, gc_content):
"""
Computes coverage and GC content statistics
:param coverages: list of depth of coverage values
:param gc_content: the GC content for this sequence precomputed
:return: the coverage and GC content exon statistics in JSON-friendly format
"""
stats = {
constants.BASES: len(coverages) if coverages else 0,
constants.AVERAGE: round(float(np.mean(coverages)), 3) if coverages else 0.0,
constants.MEDIAN: round(float(np.median(coverages)), 3) if coverages else 0.0,
constants.PERCENTILE75: round(float(np.percentile(coverages, 75)), 3) if coverages else 0.0,
constants.PERCENTILE25: round(float(np.percentile(coverages, 25)), 3) if coverages else 0.0,
constants.SD: round(float(np.std(coverages)), 3) if coverages else 0.0,
constants.BASES_LT15X: int(np.sum(1 for x in coverages if x < 15)) if coverages else 0,
constants.BASES_GTE15X: int(np.sum(1 for x in coverages if x >= 15)) if coverages else 0,
constants.BASES_GTE30X: int(np.sum(1 for x in coverages if x >= 30)) if coverages else 0,
constants.BASES_GTE50X: int(np.sum(1 for x in coverages if x >= 50) if coverages else 0)
}
stats[constants.LT15X] = round(float(stats[constants.BASES_LT15X]) / stats[constants.BASES], 5) \
if stats[constants.BASES] > 0 else 0.0
stats[constants.GTE15X] = round(float(stats[constants.BASES_GTE15X]) / stats[constants.BASES], 5) \
if stats[constants.BASES] > 0 else 0.0
stats[constants.GTE30X] = round(float(stats[constants.BASES_GTE30X]) / stats[constants.BASES], 5) \
if stats[constants.BASES] > 0 else 0.0
stats[constants.GTE50X] = round(float(stats[constants.BASES_GTE50X]) / stats[constants.BASES], 5) \
if stats[constants.BASES] > 0 else 0.0
if gc_content is not None: # GC content is not provided for padded exons
stats[constants.GC_CONTENT] = gc_content
return stats
def compute_transcript_level_statistics(exons):
"""
Computes coverage and GC content statistics at gene level by aggregating the statistics at exon level.
Median and percentiles are estimated by weighting the per-exon metric by the number of bases.
:param exons: list of exon coverage and GC content statistics
:return: the coverage and GC content gene statistics in JSON-friendly format
"""
exons_stats = [x[constants.STATISTICS] for x in exons]
total_bases = int(np.sum([x[constants.BASES] for x in exons_stats])) if exons_stats else 0
bases_lt_15x = int(np.sum([x[constants.BASES_LT15X] for x in exons_stats])) if exons_stats else 0
bases_gte_15x = int(np.sum([x[constants.BASES_GTE15X] for x in exons_stats])) if exons_stats else 0
bases_gte_30x = int(np.sum([x[constants.BASES_GTE30X] for x in exons_stats])) if exons_stats else 0
bases_gte_50x = int(np.sum([x[constants.BASES_GTE50X] for x in exons_stats])) if exons_stats else 0
stats = {
constants.BASES: total_bases,
constants.AVERAGE: round(float(np.mean([x[constants.AVERAGE] for x in exons_stats])), 3)
if exons_stats else 0.0,
constants.MEDIAN: round(float(np.sum(
[x[constants.MEDIAN] * x[constants.BASES] for x in exons_stats])) / total_bases, 3) if exons_stats else float(0.0),
constants.PERCENTILE25: round(float(np.sum(
[x[constants.PERCENTILE25] * x[constants.BASES] for x in exons_stats])) / total_bases, 3)
if exons_stats else 0.0,
constants.PERCENTILE75: round(float(np.sum(
[x[constants.PERCENTILE75] * x[constants.BASES] for x in exons_stats])) / total_bases, 3)
if exons_stats else 0.0,
constants.SD: round(float(np.sum(
[x[constants.SD] * x[constants.BASES] for x in exons_stats])) / total_bases, 3) if exons_stats else 0.0,
constants.LT15X: round(float(bases_lt_15x) / total_bases, 5) if total_bases > 0 else 0.0,
constants.GTE15X: round(float(bases_gte_15x) / total_bases, 5) if total_bases > 0 else 0.0,
constants.GTE30X: round(float(bases_gte_30x) / total_bases, 5) if total_bases > 0 else 0.0,
constants.GTE50X: round(float(bases_gte_50x) / total_bases, 5) if total_bases > 0 else 0.0,
constants.BASES_LT15X: bases_lt_15x,
constants.BASES_GTE15X: bases_gte_15x,
constants.BASES_GTE30X: bases_gte_30x,
constants.BASES_GTE50X: bases_gte_50x
}
try:
stats[constants.GC_CONTENT] = round(float(np.sum(
[x[constants.GC_CONTENT] * x[constants.BASES] for x in exons_stats]) / total_bases), 5) \
if exons_stats and total_bases > 0 else 0.0
except KeyError:
# There is no GC content data to show (e.g.: the union transcript)
pass
return stats
def compute_coding_region_statistics(genes):
"""
:param genes:
:return:
"""
logging.info("Computing coding region statistics...")
results = {
constants.STATISTICS: None,
constants.CHROMOSOMES: []
}
# Avoids failing when no genes have been reported (might be related with wrong BAM and/or gene list)
if len(genes) == 0:
return results
# Compute the stats aggregated for union transcript
genes_stats = [x[constants.UNION_TRANSCRIPT][constants.STATISTICS] for x in genes]
total_bases = int(np.sum([x[constants.BASES] for x in genes_stats])) if genes_stats else 0
bases_lt_15x = int(np.sum([x[constants.BASES_LT15X] for x in genes_stats])) if genes_stats else 0
bases_gte_15x = int(np.sum([x[constants.BASES_GTE15X] for x in genes_stats])) if genes_stats else 0
bases_gte_30x = int(np.sum([x[constants.BASES_GTE30X] for x in genes_stats])) if genes_stats else 0
bases_gte_50x = int(np.sum([x[constants.BASES_GTE50X] for x in genes_stats])) if genes_stats else 0
results[constants.STATISTICS] = {
constants.BASES: total_bases,
constants.AVERAGE: round(float(np.mean([x[constants.AVERAGE] for x in genes_stats])), 3) if genes_stats else 0.0,
constants.MEDIAN: round(float(np.sum(
[x[constants.MEDIAN] * x[constants.BASES] for x in genes_stats]) / total_bases), 3)
if genes_stats and total_bases > 0 else 0.0,
constants.PERCENTILE75: round(float(np.sum(
[x[constants.PERCENTILE75] * x[constants.BASES] for x in genes_stats]) / total_bases), 3)
if genes_stats and total_bases > 0 else 0.0,
constants.PERCENTILE25: round(float(np.sum(
[x[constants.PERCENTILE25] * x[constants.BASES] for x in genes_stats]) / total_bases), 3)
if genes_stats and total_bases > 0 else 0.0,
constants.SD: round(float(np.sum(
[x[constants.SD] * x[constants.BASES] for x in genes_stats]) / total_bases), 3)
if genes_stats and total_bases > 0 else 0.0,
constants.LT15X: round(float(bases_lt_15x) / total_bases, 5) if total_bases > 0 else 0.0,
constants.GTE15X: round(float(bases_gte_15x) / total_bases, 5) if total_bases > 0 else 0.0,
constants.GTE30X: round(float(bases_gte_30x) / total_bases, 5) if total_bases > 0 else 0.0,
constants.GTE50X: round(float(bases_gte_50x) / total_bases, 5) if total_bases > 0 else 0.0
}
# Compute the stats disaggregated by chromosome
chr2stats = [(x[constants.CHROMOSOME], x[constants.UNION_TRANSCRIPT][constants.STATISTICS]) for x in genes]
def groupby_chromosome(list_of_tuples):
it = itertools.groupby(list_of_tuples, operator.itemgetter(0))
for _chromosome, subiter in it:
yield _chromosome, [item[1] for item in subiter]
# Aggregates stats for all chromosomes
chromosome_stats = dict(groupby_chromosome(chr2stats))
autosomes_stats = []
for chromosome, chr_stats in chromosome_stats.iteritems():
chr_total_bases = int(np.sum([x[constants.BASES] for x in chr_stats])) if chr_stats else 0
chr_bases_lt_15x = int(np.sum([x[constants.BASES_LT15X] for x in chr_stats])) if chr_stats else 0
chr_bases_gte_15x = int(np.sum([x[constants.BASES_GTE15X] for x in chr_stats])) if chr_stats else 0
chr_bases_gte_30x = int(np.sum([x[constants.BASES_GTE30X] for x in chr_stats])) if chr_stats else 0
chr_bases_gte_50x = int(np.sum([x[constants.BASES_GTE50X] for x in chr_stats])) if chr_stats else 0
formatted_chr_stats = {
constants.CHROMOSOME: chromosome,
constants.BASES: chr_total_bases,
constants.AVERAGE: round(float(np.mean([x[constants.AVERAGE] for x in chr_stats])), 3)
if chr_stats else 0.0,
constants.MEDIAN: round(float(np.sum(
[x[constants.MEDIAN] * x[constants.BASES] for x in chr_stats]) / chr_total_bases), 3)
if chr_stats and chr_total_bases > 0 else 0.0,
constants.PERCENTILE75: round(float(np.sum(
[x[constants.PERCENTILE75] * x[constants.BASES] for x in chr_stats]) / chr_total_bases), 3)
if chr_stats and chr_total_bases > 0 else 0.0,
constants.PERCENTILE25: round(float(np.sum(
[x[constants.PERCENTILE25] * x[constants.BASES] for x in chr_stats]) / chr_total_bases), 3)
if chr_stats and chr_total_bases > 0 else 0.0,
constants.SD: round(float(np.sum(
[x[constants.SD] * x[constants.BASES] for x in chr_stats]) / chr_total_bases), 3)
if chr_stats and chr_total_bases > 0 else 0.0,
constants.LT15X: round(float(chr_bases_lt_15x) / chr_total_bases, 5) if chr_total_bases > 0 else 0.0,
constants.GTE15X: round(float(chr_bases_gte_15x) / chr_total_bases, 5) if chr_total_bases > 0 else 0.0,
constants.GTE30X: round(float(chr_bases_gte_30x) / chr_total_bases, 5) if chr_total_bases > 0 else 0.0,
constants.GTE50X: round(float(chr_bases_gte_50x) / chr_total_bases, 5) if chr_total_bases > 0 else 0.0
}
results[constants.CHROMOSOMES].append(formatted_chr_stats)
logging.info("Coding region statistics for chromosome %s computed!" % chromosome)
# Records stats for autosome
if chromosome in constants.AUTOSOME_IDS:
autosomes_stats.append(formatted_chr_stats)
# Aggregates stats for autosomes
autosomes_total_bases = int(np.sum([x[constants.BASES] for x in autosomes_stats])) if autosomes_stats else 0
autosomes_chr_stats = {
constants.CHROMOSOME: constants.AUTOSOMES,
constants.BASES: autosomes_total_bases,
constants.AVERAGE: round(float(np.mean([x[constants.AVERAGE] for x in autosomes_stats])), 3)
if autosomes_stats else 0.0,
constants.MEDIAN: round(float(np.sum(
[x[constants.MEDIAN] * x[constants.BASES] for x in autosomes_stats]) / autosomes_total_bases), 3)
if autosomes_stats and autosomes_total_bases > 0 else 0.0,
constants.PERCENTILE75: round(float(np.sum(
[x[constants.PERCENTILE75] * x[constants.BASES] for x in autosomes_stats]) / autosomes_total_bases), 3)
if autosomes_stats and autosomes_total_bases > 0 else 0.0,
constants.PERCENTILE25: round(float(np.sum(
[x[constants.PERCENTILE25] * x[constants.BASES] for x in autosomes_stats]) / autosomes_total_bases), 3)
if autosomes_stats and autosomes_total_bases > 0 else 0.0,
constants.SD: round(float(np.sum(
[x[constants.SD] * x[constants.BASES] for x in autosomes_stats]) / autosomes_total_bases), 3)
if autosomes_stats and autosomes_total_bases > 0 else 0.0,
constants.LT15X: round(float(np.sum(
[x[constants.LT15X] * x[constants.BASES] for x in autosomes_stats]) / autosomes_total_bases), 5)
if autosomes_stats and autosomes_total_bases > 0 else 0.0,
constants.GTE15X: round(float(np.sum(
[x[constants.GTE15X] * x[constants.BASES] for x in autosomes_stats]) / autosomes_total_bases), 5)
if autosomes_stats and autosomes_total_bases > 0 else 0.0,
constants.GTE30X: round(float(np.sum(
[x[constants.GTE30X] * x[constants.BASES] for x in autosomes_stats]) / autosomes_total_bases), 5)
if autosomes_stats and autosomes_total_bases > 0 else 0.0,
constants.GTE50X: round(float(np.sum(
[x[constants.GTE50X] * x[constants.BASES] for x in autosomes_stats]) / autosomes_total_bases), 5)
if autosomes_stats and autosomes_total_bases > 0 else 0.0
}
results[constants.CHROMOSOMES].append(autosomes_chr_stats)
logging.info("Coding region statistics computed!")
return results
def compute_whole_genome_statistics(bigwig_reader, bed_reader=None, chunk_size=100000):
"""
Iterates through the whole genome in a sliding window to obtain some metrics
:type chunk_size: int
:type bigwig_reader: BigWigReader
:type bed_reader: BedReader
:param bigwig_reader:
:return:
"""
logging.info("Computing whole genome statistics...")
results = {
constants.STATISTICS: None,
constants.CHROMOSOMES: []
}
if bed_reader.is_null_bed:
logging.info("Running on all chromosomes defined in the bigwig.")
analysis_regions = bigwig_reader.get_chromosome_lengths()
else:
logging.info("Running on the regions provided in a bed file in --wg-region.")
analysis_regions = bed_reader.get_regions_dictionary()
# Iterates each chromosome
chr_stats = {}
autosomes_stats = []
for chromosome, regions in analysis_regions.iteritems():
chr_stats[chromosome] = {
constants.RMSD: [],
constants.AVERAGE: [],
constants.BASES: [],
constants.BASES_LT15X: [],
constants.BASES_GTE15X: [],
constants.BASES_GTE30X: [],
constants.BASES_GTE50X: [],
constants.MEDIAN: [],
constants.PERCENTILE25: [],
constants.PERCENTILE75: [],
constants.SD: []
}
# Iterates intervals in chunks of fixed size and stores the stats for each chunk
for (start, end) in regions:
logging.debug("Analysing region %s:%s-%s" % (chromosome, start, end))
current_start = start
current_end = min(current_start + chunk_size, end)
while current_start < current_end:
logging.debug("Analysing chunk %s:%s-%s" % (chromosome,
current_start,
current_end))
coverages = bigwig_reader.read_bigwig_coverages(chromosome, current_start, current_end, strict=True)
length = current_end - current_start
chunk_mean = np.mean(coverages)
if chunk_mean == 0:
# As coverage values are positive values we infer that all values are zero
# this may speed up things for missing long regions in the bigwig file, if any
chunk_rmsd = 0
else:
# Gets the squared root sum of squares of the deviation from the mean
chunk_rmsd = np.sqrt(( | np.sum([(x - chunk_mean) ** 2 for x in coverages]) | numpy.sum |
import math
import numpy as np
import spacy
from gensim import models
from gensim.corpora import Dictionary
from gensim.matutils import sparse2full
from gensim.models.tfidfmodel import TfidfModel
# Text2Vec Class
class Text2Vec:
def __init__(self, doc_list):
# Initialize
self.doc_list = doc_list
self.nlp, self.docs, self.docs_dict = self._preprocess(self.doc_list)
# Functions to lemmatise docs
def _keep_token(self, t):
return (t.is_alpha and
not (t.is_space or t.is_punct or
t.is_stop or t.like_num))
def _lemmatize_doc(self, doc):
return [t.lemma_ for t in doc if self._keep_token(t)]
# Gensim to create a dictionary and filter out stop and infrequent words (lemmas).
def _get_docs_dict(self, docs):
docs_dict = Dictionary(docs)
# CAREFUL: For small corpus please carefully modify the parameters for filter_extremes, or simply comment it out.
# docs_dict.filter_extremes(no_below=5, no_above=0.2)
docs_dict.compactify()
return docs_dict
# Preprocess docs
def _preprocess(self, doc_list):
# Load spacy model
nlp = spacy.load('en_core_web_md')
# lemmatise docs
# docs = [self._lemmatize_doc(nlp(doc)) for doc in doc_list]
docs = [[nlp(doc).text] for doc in doc_list]
# Get docs dictionary
docs_dict = self._get_docs_dict(docs)
return nlp, docs, docs_dict
# Gensim can again be used to create a bag-of-words representation of each document,
# build the TF-IDF model,
# and compute the TF-IDF vector for each document.
def _get_tfidf(self):
docs_corpus = [self.docs_dict.doc2bow(doc) for doc in self.docs]
model_tfidf = TfidfModel(docs_corpus, id2word=self.docs_dict)
docs_tfidf = model_tfidf[docs_corpus]
docs_vecs = np.vstack([sparse2full(c, len(self.docs_dict)) for c in docs_tfidf])
return docs_vecs
# Get avg w2v for one document
def _document_vector(self, doc, docs_dict, nlp):
# remove out-of-vocabulary words
doc_vector = [nlp(word).vector for word in doc if word in docs_dict.token2id]
return np.mean(doc_vector, axis=0)
# Get a TF-IDF weighted Glove vector summary for document list
# Input: a list of documents, Output: Matrix of vector for all the documents
def tfidf_weighted_wv(self):
# tf-idf
docs_vecs = self._get_tfidf()
# Load glove embedding vector for each TF-IDF term
tfidf_emb_vecs = np.vstack([self.nlp(self.docs_dict[i]).vector for i in range(len(self.docs_dict))])
# To get a TF-IDF weighted Glove vector summary of each document,
# we just need to matrix multiply docs_vecs with tfidf_emb_vecs
docs_emb = | np.dot(docs_vecs, tfidf_emb_vecs) | numpy.dot |
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from matplotlib import cm
import spectral as spy
from sklearn import metrics
import time
from sklearn import preprocessing
import torch
import MSSGU
from utils import Draw_Classification_Map,distcorr,applyPCA,get_Samples_GT,GT_To_One_Hot
from SegmentMap import SegmentMap
import dcor
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu" )
for Neighbors in [0]: #0, 5,10,15,20
# for (FLAG, curr_train_ratio,Unet_Depth) in [ (1,0.05,1),(1,0.05,2),(1,0.05,3),(1,0.05,4),
# (2,0.005,1),(2,0.005,2),(2,0.005,3),(2,0.005,4),
# (3,0.005,1),(3,0.005,2),(3,0.005,3),(3,0.005,4)]:
# for (FLAG, curr_train_ratio,Unet_Depth) in [ (1,5,4),(1,10,4),(1,15,4),(1,20,4),(1,25,4),
# (2,5,4),(2,10,4),(2,15,4),(2,20,4),(2,25,4),
# (3,5,4),(3,10,4),(3,15,4),(3,20,4),(3,25,4)]:
for (FLAG, curr_train_ratio,Unet_Depth) in [(1,0.05,4)]: #(1,0.05,4),,(3,0.005,4)(2,0.005,4),(3,0.005,4)(2,0.005,4),(3,0.005,4)
torch.cuda.empty_cache()
OA_ALL = [];AA_ALL = [];KPP_ALL = [];AVG_ALL = [];Train_Time_ALL=[];Test_Time_ALL=[]
samples_type = 'ratio' if curr_train_ratio < 1 else 'same_num'
# Seed_List=[0,1,2,3,4,5,6,7,8,9]
# Seed_List=[0,1,2,3,4]
Seed_List = [0,]
if FLAG == 1:
# data_mat = sio.loadmat('..\\HyperImage_data\\indian\\Indian_pines_corrected.mat')
data_mat = sio.loadmat('HyperImage_data\\indian\\Indian_pines_corrected.mat')
data = data_mat['indian_pines_corrected']
# gt_mat = sio.loadmat('..\\HyperImage_data\\indian\\Indian_pines_gt.mat')
gt_mat = sio.loadmat('HyperImage_data\\indian\\Indian_pines_gt.mat')
gt = gt_mat['indian_pines_gt']
val_ratio = 0.01
class_count = 16
learning_rate = 5e-4
max_epoch =600
dataset_name = "indian_"
pass
if FLAG == 2:
data_mat = sio.loadmat('..\\HyperImage_data\\paviaU\\PaviaU.mat')
data = data_mat['paviaU']
gt_mat = sio.loadmat('..\\HyperImage_data\\paviaU\\Pavia_University_gt.mat')
gt = gt_mat['pavia_university_gt']
val_ratio = 0.005
class_count = 9
learning_rate = 5e-4
max_epoch = 600
dataset_name = "paviaU_"
pass
if FLAG == 3:
data_mat = sio.loadmat('..\\HyperImage_data\\Salinas\\Salinas_corrected.mat')
data = data_mat['salinas_corrected']
gt_mat = sio.loadmat('..\\HyperImage_data\\Salinas\\Salinas_gt.mat')
gt = gt_mat['salinas_gt']
val_ratio = 0.005
class_count = 16
learning_rate = 5e-4
max_epoch = 600
dataset_name = "salinas_"
pass
if FLAG == 4:
data_mat = sio.loadmat('..\\HyperImage_data\\KSC\\KSC.mat')
data = data_mat['KSC']
gt_mat = sio.loadmat('..\\HyperImage_data\\KSC\\KSC_gt.mat')
gt = gt_mat['KSC_gt']
val_ratio = 0.01
class_count = 13
learning_rate = 5e-4
max_epoch = 600
dataset_name = "KSC_"
pass
if samples_type == 'same_num': val_ratio = 1 ########
train_ratio = curr_train_ratio
cmap = cm.get_cmap('jet', class_count + 1)
plt.set_cmap(cmap)
m, n, d = data.shape
orig_data=data
height, width, bands = data.shape
data = | np.reshape(data, [height * width, bands]) | numpy.reshape |
#!/usr/bin/env python
#
import random
import numpy as np
from copy import copy
from bart_utils import empty, Tree, logsumexp, softmax, check_if_zero, get_children_id
#from itertools import izip, count
from itertools import count
class Particle(Tree):
def __init__(self, train_ids=np.arange(0, dtype='int'), param=empty(), settings=empty(), cache_tmp={}):
Tree.__init__(self, train_ids, param, settings, cache_tmp)
self.ancestry = []
self.nodes_processed_itr = []
self.grow_nodes_itr = []
self.log_sis_ratio_d = {}
if cache_tmp:
self.do_not_grow = False
self.grow_nodes = [0]
def process_node_id(self, data, param, settings, cache, node_id):
if self.do_not_split[node_id]:
log_sis_ratio = 0.0
else:
log_psplit = np.log(self.compute_psplit(node_id, param))
train_ids = self.train_ids[node_id]
left, right = get_children_id(node_id)
if settings.verbose >= 4:
print('train_ids for this node = %s' % train_ids)
(do_not_split_node_id, feat_id_chosen, split_chosen, idx_split_global, log_sis_ratio, logprior_nodeid, \
train_ids_left, train_ids_right, cache_tmp, loglik_left, loglik_right) \
= self.prior_proposal(data, param, settings, cache, node_id, train_ids, log_psplit)
if do_not_split_node_id:
self.do_not_split[node_id] = True
else:
self.update_left_right_statistics(cache_tmp, node_id, logprior_nodeid, train_ids_left,\
train_ids_right, loglik_left, loglik_right, feat_id_chosen, split_chosen, \
idx_split_global, settings, param, data, cache)
self.grow_nodes.append(left)
self.grow_nodes.append(right)
return (log_sis_ratio)
def grow_next(self, data, param, settings, cache):
""" grows just one node at a time (nodewise expansion)
breaks after processing the first non do_not_grow node or when grow_nodes is empty
Note that multiple nodes could be killed in a single grow_next call
"""
# FIXME: refactor without the do_not_grow option; it made sense for SMC paper, but not for PG
do_not_grow = True
log_sis_ratio = 0.0
nodes_processed = []
if not self.grow_nodes:
if settings.verbose >= 2:
print('None of the leaves can be grown any further: Current ' \
'depth = %3d, Skipping grow_next' % self.depth)
else:
while True:
# loop through current leaf nodes, process first "non do_not_grow" node and break;
# if none of the nodes can be processed, do_not_grow = True
remove_position = 0 # just pop the oldest node
node_id = self.grow_nodes.pop(remove_position)
nodes_processed.append(node_id)
do_not_grow = do_not_grow and self.do_not_split[node_id]
if self.do_not_split[node_id]:
if settings.verbose >= 3:
print('Skipping split at node_id %3d' % node_id)
if not self.grow_nodes:
break
else:
log_sis_ratio += self.process_node_id(data, param, settings, cache, node_id)
break # you have processed a non do_not_grow node, take a break!
self.loglik_current = self.compute_loglik()
self.log_sis_ratio = log_sis_ratio
self.do_not_grow = do_not_grow
if nodes_processed:
self.nodes_processed_itr.append(nodes_processed)
def check_nodes_processed_itr(self, settings):
tmp = set([])
for nodes in self.nodes_processed_itr:
for node in nodes:
if node in tmp:
print('node = %s present multiple times in nodes_processed_itr = %s' % \
(node, self.nodes_processed_itr))
raise Exception
else:
tmp.add(node)
def update_particle_weights(particles, log_weights, settings):
for n, p in enumerate(particles):
if settings.verbose >= 2:
print('pid = %5d, log_sis_ratio = %f' % (n, p.log_sis_ratio))
log_weights[n] += p.log_sis_ratio
weights_norm = softmax(log_weights) # normalized weights
ess = 1. / np.sum(weights_norm ** 2) / settings.n_particles
log_pd = logsumexp(log_weights)
return (log_pd, ess, log_weights, weights_norm)
def resample(particles, log_weights, settings, log_pd, ess, weights_norm, tree_pg):
if ess <= settings.ess_threshold:
if tree_pg:
pid_list = resample_pids_basic(settings, settings.n_particles-1, weights_norm)
random.shuffle(pid_list) # shuffle so that particle is assigned randomly
pid_list.insert(0, 0)
else:
pid_list = resample_pids_basic(settings, settings.n_particles, weights_norm)
log_weights = np.ones(settings.n_particles) * (log_pd - | np.log(settings.n_particles) | numpy.log |
# future
from __future__ import annotations
# stdlib
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
# third party
import numpy as np
# relative
from ...lib.numpy.array import capnp_deserialize
from ...lib.numpy.array import capnp_serialize
from ..common.serde.capnp import CapnpModule
from ..common.serde.capnp import chunk_bytes
from ..common.serde.capnp import combine_bytes
from ..common.serde.capnp import get_capnp_schema
from ..common.serde.capnp import serde_magic_header
from ..common.serde.deserialize import _deserialize as deserialize
from ..common.serde.serializable import serializable
from ..common.serde.serialize import _serialize as serialize
from .config import DEFAULT_FLOAT_NUMPY_TYPE
from .config import DEFAULT_INT_NUMPY_TYPE
from .passthrough import PassthroughTensor # type: ignore
from .passthrough import is_acceptable_simple_type # type: ignore
from .smpc import context
@serializable(capnp_bytes=True)
class FixedPrecisionTensor(PassthroughTensor):
# __attr_allowlist__ = ("child", "_base", "_precision", "_scale")
def __init__(
self,
value: Union[int, float, np.ndarray] = None,
base: int = 2,
precision: int = 16,
) -> None:
self._base = base
self._precision = precision
self._scale = base**precision
if value is not None:
# TODO :Should modify to be compatiable with torch.
super().__init__(self.encode(value))
else:
super().__init__(None)
def encode(self, value: Union[int, float, np.ndarray]) -> np.ndarray:
encoded_value = np.array(self._scale * value, DEFAULT_INT_NUMPY_TYPE)
return encoded_value
@property
def precision(self) -> int:
"""Get the precision for the FixedPrecisionTensor.
Returns:
int: precision.
"""
return self._precision
@property
def base(self) -> int:
"""Get the base for the FixedPrecisionTensor.
Returns:
int: base
"""
return self._base
@property
def scale(self) -> int:
"""Get the scale for the FixedPrecisionTensor.
Returns:
int: the scale.
"""
return self._scale
def decode(self) -> Any:
# relative
from .smpc.share_tensor import ShareTensor
value = self.child.child if isinstance(self.child, ShareTensor) else self.child
correction = (value < 0).astype(DEFAULT_INT_NUMPY_TYPE)
dividend = value // self._scale - correction
remainder = value % self._scale
remainder += (
(remainder == 0).astype(DEFAULT_INT_NUMPY_TYPE) * self._scale * correction
)
value = (
dividend.astype(DEFAULT_FLOAT_NUMPY_TYPE)
+ remainder.astype(DEFAULT_FLOAT_NUMPY_TYPE) / self._scale
)
return value
def sanity_check(
self, other: Union[FixedPrecisionTensor, int, float, np.ndarray]
) -> FixedPrecisionTensor:
if isinstance(other, FixedPrecisionTensor):
if self.base != other.base or self.precision != other.precision:
raise ValueError(
f"Base:{self.base,other.base} and Precision: "
+ f"{self.precision, other.precision} should be same for "
+ "computation on FixedPrecisionTensor"
)
elif is_acceptable_simple_type(other):
other = FixedPrecisionTensor(
value=other, base=self.base, precision=self.precision
)
else:
raise ValueError(f"Invalid type for FixedPrecisionTensor: {type(other)}")
return other
def __add__(self, other: Any) -> FixedPrecisionTensor:
res = FixedPrecisionTensor(base=self._base, precision=self._precision)
if isinstance(other, np.ndarray) and other.dtype == np.dtype("bool"):
res.child = self.child + other
else:
other = self.sanity_check(other)
res.child = self.child + other.child
return res
def __sub__(self, other: Any) -> FixedPrecisionTensor:
res = FixedPrecisionTensor(base=self._base, precision=self._precision)
if isinstance(other, np.ndarray) and other.dtype == np.dtype("bool"):
res.child = self.child - other
else:
other = self.sanity_check(other)
res.child = self.child - other.child
return res
def __mul__(self, other: Any) -> FixedPrecisionTensor:
res = FixedPrecisionTensor(base=self._base, precision=self._precision)
if isinstance(other, np.ndarray) and other.dtype == np.dtype("bool"):
res.child = self.child * other
else:
other = self.sanity_check(other)
context.FPT_CONTEXT["seed_id_locations"] = context.SMPC_CONTEXT.get(
"seed_id_locations", None
)
res.child = self.child * other.child
res = res / self.scale
return res
def __matmul__(self, other: Any) -> FixedPrecisionTensor:
res = FixedPrecisionTensor(base=self._base, precision=self._precision)
if isinstance(other, np.ndarray) and other.dtype == | np.dtype("bool") | numpy.dtype |
"""Tests for the atmos_flux_inversion package.
Includes tests using random data, analytic solutions, and checks that
different methods agree for simple problems.
"""
from __future__ import print_function, division
import fractions
import itertools
import operator
import os.path
import atexit
import pickle
import math
import sys
try:
from functools import reduce
except ImportError:
# reduce used to be a builtin
pass
import numpy as np
import numpy.linalg as np_la
import numpy.linalg as la
import numpy.testing as np_tst
import scipy.linalg
import scipy.sparse
import scipy.optimize
# Import from scipy.linalg if not using dask
from scipy.linalg import cholesky
from scipy.sparse.linalg.interface import LinearOperator, MatrixLinearOperator
import unittest2
import pyfftw
import pandas as pd
import xarray
try:
import sparse
HAVE_SPARSE = True
except ImportError:
HAVE_SPARSE = False
import atmos_flux_inversion.optimal_interpolation
import atmos_flux_inversion.correlations
import atmos_flux_inversion.covariances
import atmos_flux_inversion.variational
import atmos_flux_inversion.remapper
import atmos_flux_inversion.wrapper
import atmos_flux_inversion.linalg
import atmos_flux_inversion.noise
import atmos_flux_inversion.psas
import atmos_flux_inversion.util
from atmos_flux_inversion.linalg import tolinearoperator
if os.path.exists(".pyfftw.pickle"):
with open(".pyfftw.pickle", "rb") as wis_in:
WISDOM = pickle.load(wis_in)
if isinstance(WISDOM[0], str):
WISDOM = [wis.encode("ascii")
for wis in WISDOM]
pyfftw.import_wisdom(WISDOM)
del WISDOM, wis_in
def save_wisdom():
"""Save accumulated pyfftw wisdom.
Saves in hidden file in current directory.
Should help speed up subsequent test runs.
"""
with open(".pyfftw.pickle", "wb") as wis_out:
pickle.dump(pyfftw.export_wisdom(), wis_out, 2)
atexit.register(save_wisdom)
del save_wisdom
# If adding other inexact methods to the list tested, be sure to add
# those to the `if "var" in name or "psas" in name` and
# `if "psas" in name` tests as applicable.
ALL_METHODS = (
atmos_flux_inversion.optimal_interpolation.simple,
atmos_flux_inversion.optimal_interpolation.fold_common,
atmos_flux_inversion.optimal_interpolation.save_sum,
atmos_flux_inversion.optimal_interpolation.scipy_chol,
atmos_flux_inversion.variational.simple,
atmos_flux_inversion.variational.incremental,
atmos_flux_inversion.variational.incr_chol,
atmos_flux_inversion.psas.simple,
atmos_flux_inversion.psas.fold_common,
)
ITERATIVE_METHOD_START = 4
"""Where the iterative methods start in the above list.
Used to test failure modes for these solvers.
"""
PRECISE_DTYPE = np.float128
"""The dtype used to represent analytic results.
These are initialized as :class:`fractions.Fraction` then converted to
this dtype for the comparison.
"""
ITERATIVE_STATE_TOLERANCE = 1e-3
ITERATIVE_COVARIANCE_TOLERANCE = 1e-1
EXACT_TOLERANCE = 1e-7
DTYPE = np.float64
"""Default dtype for certain tests."""
def getname(method):
"""Descriptive name for the function.
A name combining the function name and module.
Parameters
----------
method: callable
Returns
-------
name: str
"""
module = method.__module__
group = module.split(".")[-1]
variant = method.__name__
return "{group:s} ({variant:s})".format(group=group,
variant=variant)
def expectFailureIf(condition):
"""Mark a test as XFAIL based on condition.
Wrapper to make :func:`unittest2.expectedFailure` conditional.
Parameters
----------
condition: bool
Returns
-------
decorator: func
"""
if condition:
return unittest2.expectedFailure
return lambda fun: fun
class TestInversionSimple(unittest2.TestCase):
"""Test inversions using simple cases."""
def test_scalar_equal_variance(self):
"""Test a direct measurement of a scalar state."""
bg = np.atleast_1d(2.)
bg_cov = np.atleast_2d(1.)
obs = np.atleast_1d(3.)
obs_cov = np.atleast_2d(1.)
obs_op = np.atleast_2d(1.)
for method in ALL_METHODS:
name = getname(method)
with self.subTest(method=name):
post, post_cov = method(
bg, bg_cov, obs, obs_cov, obs_op)
np_tst.assert_allclose(post, 2.5)
np_tst.assert_allclose(post_cov, .5)
def test_scalar_unequal_variance(self):
"""Test assimilation of a direct measurement fo a scalar state.
Variances not equal.
"""
bg = np.atleast_1d(15.)
bg_cov = np.atleast_2d(2.)
obs = np.atleast_1d(14.)
obs_cov = np.atleast_2d(1.)
obs_op = np.atleast_2d(1.)
for method in ALL_METHODS:
with self.subTest(method=getname(method)):
post, post_cov = method(
bg, bg_cov, obs, obs_cov, obs_op)
np_tst.assert_allclose(
post, PRECISE_DTYPE(14 + fractions.Fraction(1, 3)))
np_tst.assert_allclose(
post_cov, PRECISE_DTYPE(fractions.Fraction(2, 3)))
def test_multiple_priors(self):
"""Test doing multiple assimilations at once.
Simple test.
"""
bg = np.array([[2., 3.]])
bg_cov = np.atleast_2d(1.)
obs = np.array([[3., 4.]])
obs_cov = np.atleast_2d(1.)
obs_op = np.atleast_2d(1.)
for method in ALL_METHODS[:ITERATIVE_METHOD_START]:
name = getname(method)
with self.subTest(method=name):
post, post_cov = method(
bg, bg_cov, obs, obs_cov, obs_op)
np_tst.assert_allclose(post, [[2.5, 3.5]])
np_tst.assert_allclose(post_cov, .5)
def test_homework_one(self):
"""Verify that this can reproduce the answers to HW1.
Make sure the answers here are within roundoff of the analytic
solutions.
"""
bg = np.array((18., 15., 22.))
bg_var = np.array((2., 2., 2.))
bg_corr = np.array(((1, .5, .25),
(.5, 1, .5),
(.25, .5, 1)))
obs = np.array((19., 14.))
obs_var = np.array((1., 1.))
obs_op = np.array(((1., 0., 0.),
(0., 1., 0.)))
bg_std = np.sqrt(bg_var)
bg_cov = np.diag(bg_std).dot(bg_corr.dot(np.diag(bg_std)))
# obs_std = np.sqrt(obs_var)
# Assume no correlations between observations.
obs_cov = np.diag(obs_var)
for method in ALL_METHODS:
# Setup for expected degradation of solutions
name = getname(method)
# The default for assert_allclose
cov_rtol = state_rtol = EXACT_TOLERANCE
with self.subTest(method=name):
# Also tested above in scalar_unequal_variance
with self.subTest(problem=3):
state_college_index = 1
post, post_cov = method(
bg[state_college_index],
bg_cov[state_college_index, state_college_index],
obs[state_college_index],
obs_cov[state_college_index, state_college_index],
obs_op[state_college_index, state_college_index])
np_tst.assert_allclose(
post, np.asanyarray(14 + fractions.Fraction(1, 3),
dtype=PRECISE_DTYPE),
rtol=state_rtol)
np_tst.assert_allclose(
post_cov, np.asanyarray(fractions.Fraction(2, 3),
dtype=PRECISE_DTYPE),
rtol=cov_rtol)
with self.subTest(problem=4):
state_college_index = 1
post, post_cov = method(
bg, bg_cov,
obs[state_college_index],
obs_cov[state_college_index, state_college_index],
obs_op[state_college_index, :])
np_tst.assert_allclose(
post, np.asanyarray((17 + fractions.Fraction(2, 3),
14 + fractions.Fraction(1, 3),
21 + fractions.Fraction(2, 3)),
dtype=PRECISE_DTYPE),
rtol=state_rtol)
with self.subTest(problem=5):
pittsburgh_index = 0
post, post_cov = method(
bg, bg_cov,
obs[pittsburgh_index],
obs_cov[pittsburgh_index, pittsburgh_index],
obs_op[pittsburgh_index, :])
np_tst.assert_allclose(
post,
np.asanyarray((18 + fractions.Fraction(2, 3),
15 + fractions.Fraction(1, 3),
22 + fractions.Fraction(1, 6)),
PRECISE_DTYPE),
rtol=state_rtol)
with self.subTest(problem=7):
state_college_index = 1
post, post_cov = method(
bg, bg_cov,
obs[state_college_index],
4 * obs_cov[state_college_index, state_college_index],
obs_op[state_college_index, :])
np_tst.assert_allclose(
post, np.asanyarray((17 + fractions.Fraction(5, 6),
14 + fractions.Fraction(2, 3),
21 + fractions.Fraction(5, 6)),
dtype=PRECISE_DTYPE),
rtol=state_rtol)
with self.subTest(problem=8):
post, post_cov = method(
bg, bg_cov, obs, obs_cov, obs_op)
# background correlations make this problem not
# strictly linear, at least without doing
# sequential inversions. Have not verified by hand
np_tst.assert_allclose(
post, np.asanyarray(
(18 + fractions.Fraction(1, 2),
14 + fractions.Fraction(1, 2),
21 + fractions.Fraction(3, 4)),
dtype=PRECISE_DTYPE),
rtol=state_rtol)
def test_sequential_assimilations(self):
"""Make sure this follows Bayes' rule."""
bg = np.array((18., 15., 22.))
bg_var = np.array((2., 2., 2.))
bg_corr = np.array(((1, .5, .25),
(.5, 1, .5),
(.25, .5, 1)))
obs = np.array((19., 14.))
obs_var = np.array((1., 1.))
obs_op = np.array(((1., 0., 0.),
(0., 1., 0.)))
bg_std = np.sqrt(bg_var)
bg_cov = np.diag(bg_std).dot(bg_corr.dot(np.diag(bg_std)))
# obs_std = np.sqrt(obs_var)
# Assume no correlations between observations.
obs_cov = np.diag(obs_var)
for method in ALL_METHODS:
name = getname(method)
if "var" in name.lower() or "psas" in name.lower():
state_rtol = ITERATIVE_STATE_TOLERANCE
cov_rtol = ITERATIVE_COVARIANCE_TOLERANCE
else:
# The default for assert_allclose
cov_rtol = state_rtol = EXACT_TOLERANCE
with self.subTest(method=name):
inter1, inter_cov1 = method(
bg, bg_cov, obs[0], obs_cov[0, 0],
obs_op[0, :])
post1, post_cov1 = method(
inter1, inter_cov1, obs[1], obs_cov[1, 1],
obs_op[1, :])
post2, post_cov2 = method(
bg, bg_cov, obs, obs_cov, obs_op)
np_tst.assert_allclose(
post1, post2, rtol=state_rtol)
if "psas" in name.lower():
# The second covariance isn't positive definite (one
# positive entry) and no entry shares the order of
# magnitude between the two.
raise unittest2.SkipTest("Known Failure: PSAS Covariances")
np_tst.assert_allclose(
post_cov1, post_cov2, rtol=cov_rtol)
def test_iterative_failures(self):
"""Test failure modes of iterative solvers."""
bg_stds = np.logspace(-8, 1, 10)
bg_corr = scipy.linalg.toeplitz(
np.arange(1, .9, -.01))
bg_cov = np.diag(bg_stds).dot(bg_corr).dot(np.diag(bg_stds))
bg_vals = np.arange(10)
obs_op = np.eye(3, 10)
obs_vals = 10 - np.arange(3)
obs_cov = np.diag((10, 1e-3, 1e-6)) / 8
for method in ALL_METHODS[ITERATIVE_METHOD_START:]:
name = getname(method)
with self.subTest(method=name):
with self.assertRaises(
atmos_flux_inversion.ConvergenceError) as cxt_mgr:
method(bg_vals, bg_cov, obs_vals, obs_cov, obs_op)
conv_err = cxt_mgr.exception
self.assertTrue(hasattr(conv_err, "guess"))
self.assertTrue(hasattr(conv_err, "result"))
self.assertIsInstance(conv_err.result,
scipy.optimize.OptimizeResult)
self.assertTrue(hasattr(conv_err, "hess_inv"))
class TestGaussianNoise(unittest2.TestCase):
"""Test the properties of the gaussian noise."""
def test_ident_cov(self):
"""Test generation with identity as covariance."""
sample_shape = 3
cov = np.eye(sample_shape)
noise = atmos_flux_inversion.noise.gaussian_noise(cov, int(1e6))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros((sample_shape,)),
rtol=1e-2, atol=1e-2)
np_tst.assert_allclose(np.cov(noise.T), cov,
rtol=1e-2, atol=1e-2)
def test_shape(self):
"""Make sure the returned shapes are correct."""
sample_shape = (3,)
sample_cov = np.eye(sample_shape[0])
for shape in ((), (6,), (2, 3)):
with self.subTest(shape=shape):
res = atmos_flux_inversion.noise.gaussian_noise(
sample_cov, shape)
self.assertEqual(res.shape, shape + sample_shape)
with self.subTest(shape=5):
res = atmos_flux_inversion.noise.gaussian_noise(
sample_cov, 5)
self.assertEqual(res.shape, (5,) + sample_shape)
with self.subTest(shape=None):
res = atmos_flux_inversion.noise.gaussian_noise(
sample_cov, None)
self.assertEqual(res.shape, sample_shape)
def test_operator(self):
"""Test that the code works with operator covariances."""
diagonal = (1, .5, .3, .2, .1)
sample_cov = atmos_flux_inversion.covariances.DiagonalOperator(
diagonal)
sample_shape = (len(diagonal),)
noise = atmos_flux_inversion.noise.gaussian_noise(sample_cov, int(1e6))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros(sample_shape),
rtol=1e-2, atol=1e-2)
np_tst.assert_allclose(np.cov(noise.T), np.diag(diagonal),
rtol=1e-2, atol=1e-2)
def test_kron_op(self):
"""Test that large kronecker operators don't break the handling."""
op1 = scipy.linalg.toeplitz(.6 ** np.arange(15))
diag = (1, .9, .8, .7, .6, .5, .4, .3, .2, .1)
op2 = atmos_flux_inversion.covariances.DiagonalOperator(diag)
combined = atmos_flux_inversion.util.kronecker_product(op1, op2)
noise = atmos_flux_inversion.noise.gaussian_noise(combined, int(1e5))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros(combined.shape[0]),
rtol=1.1e-2, atol=1.1e-2)
np_tst.assert_allclose(np.cov(noise.T),
scipy.linalg.kron(op1, np.diag(diag)),
rtol=3e-2, atol=3e-2)
def test_off_diagonal(self):
"""Test that the code works with off-diagonal elements."""
sample_cov = scipy.linalg.toeplitz((1, .5, .25, .125))
sample_shape = (4,)
noise = atmos_flux_inversion.noise.gaussian_noise(sample_cov, int(1e6))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros(sample_shape),
rtol=1e-2, atol=1e-2)
np_tst.assert_allclose(np.cov(noise.T), sample_cov,
rtol=1e-2, atol=1e-2)
def test_slow_decay(self):
"""Test that the code handles slowly-decaying covariances."""
sample_cov = scipy.linalg.toeplitz(.8 ** np.arange(10))
sample_shape = (10,)
noise = atmos_flux_inversion.noise.gaussian_noise(sample_cov, int(1e6))
np_tst.assert_allclose(noise.mean(axis=0),
np.zeros(sample_shape),
rtol=1e-2, atol=1e-2)
np_tst.assert_allclose(np.cov(noise.T), sample_cov,
rtol=1e-2, atol=1e-2)
def test_fails(self):
"""Test that construction fails on invalid input."""
self.assertRaises(ValueError,
atmos_flux_inversion.noise.gaussian_noise,
np.ones(10))
self.assertRaises(ValueError,
atmos_flux_inversion.noise.gaussian_noise,
np.eye(3, 2))
class TestCorrelations(unittest2.TestCase):
"""Test the generation of correlation matrices."""
def test_far_correl(self):
"""Test the correlation between points far apart.
Should be zero.
"""
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction
.__subclasses__()):
with self.subTest(corr_class=corr_class.__name__):
corr_fun = corr_class(1e-8)
corr = corr_fun(1e8)
self.assertAlmostEqual(corr, 0)
def test_near_correl(self):
"""Test 2D correlation between near points.
Should be one.
"""
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction
.__subclasses__()):
with self.subTest(corr_class=corr_class.__name__):
corr_fun = corr_class(1e8)
corr = corr_fun(1e-8)
self.assertAlmostEqual(corr, 1)
def test_2d_np_fromfunction(self):
"""Test that the structure works with np.fromfunction.
This is how the integration tests will get background
covariances, so this needs to work.
"""
test_size = (int(15), int(20))
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction
.__subclasses__()):
with self.subTest(corr_class=getname(corr_class)):
corr_fun = corr_class(2.)
corr = np.fromfunction(corr_fun.correlation_from_index,
shape=test_size * 2, dtype=float)
corr_mat = corr.reshape((np.prod(test_size),) * 2)
# test postitive definite
try:
chol_upper = cholesky(corr_mat)
except la.LinAlgError:
self.fail("corr_mat not positive definite")
# test symmetry
np_tst.assert_allclose(chol_upper.T.dot(chol_upper),
corr_mat,
rtol=1e-4, atol=1e-4)
def test_2d_make_matrix(self):
"""Test make_matrix for 2D correlations.
Checks against original value.
This test is really slow.
"""
# 30x25 Gaussian 10 not close
test_nx = 30
test_ny = 20
test_points = test_ny * test_nx
# TODO: speed up
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 5, 10, 15):
with self.subTest(corr_class=getname(corr_class),
dist=dist):
if (
corr_class ==
atmos_flux_inversion.correlations.
GaussianCorrelation
):
raise unittest2.SkipTest(
"Gaussian({0:d}) correlations ill-conditioned".
format(dist)
)
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, (test_ny, test_nx))
# Make sure diagonal elements are ones
np_tst.assert_allclose(np.diag(corr_mat), 1, rtol=1e-6)
# check if it matches the original
np_tst.assert_allclose(
corr_mat,
np.fromfunction(
corr_fun.correlation_from_index,
(test_ny, test_nx, test_ny, test_nx)
).reshape((test_points, test_points)),
# rtol=1e-13: Gaussian 10 and 15 fail
# atol=1e-15: Gaussian 1 and 5 fail
rtol=1e-5, atol=1e-6)
# check if it actually is positive definite
cholesky(corr_mat)
def test_1d_np_fromfunction(self):
"""Test that the structure works with np.fromfunction.
This is how the integration tests will get background
covariances, so this needs to work.
"""
test_size = (200,)
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction
.__subclasses__()):
with self.subTest(corr_class=getname(corr_class)):
# This fails with a correlation length of 5
corr_fun = corr_class(2.)
corr = np.fromfunction(corr_fun.correlation_from_index,
shape=test_size * 2, dtype=float)
corr_mat = corr.reshape((np.prod(test_size),) * 2)
# test postitive definite
chol_upper = cholesky(corr_mat)
# test symmetry
np_tst.assert_allclose(chol_upper.T.dot(chol_upper),
corr_mat,
rtol=1e-4, atol=1e-4)
def test_1d_make_matrix(self):
"""Test make_matrix for 1D correlations.
Checks against original value.
"""
test_nt = 200
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 5, 10, 30):
with self.subTest(corr_class=getname(corr_class),
dist=dist):
if (
corr_class ==
atmos_flux_inversion.correlations.
GaussianCorrelation
):
raise unittest2.SkipTest(
"Gaussian({0:d}) correlations ill-conditioned".
format(dist)
)
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun,
test_nt
)
# Make sure diagonal elements are ones
np_tst.assert_allclose(np.diag(corr_mat), 1, rtol=1e-6)
# check if it matches the original
np_tst.assert_allclose(
corr_mat,
np.fromfunction(
corr_fun.correlation_from_index, (test_nt, test_nt)
).reshape((test_nt, test_nt)),
# rtol=1e-13: Gaussian 10 and 15 fail
# atol=1e-15: Gaussian 1 and 5 fail
rtol=2e-7, atol=5e-7
)
# check if it actually is positive definite
chol_upper = cholesky(corr_mat)
# test symmetry
np_tst.assert_allclose(chol_upper.T.dot(chol_upper),
corr_mat,
rtol=1e-4, atol=1e-4)
def test_fft_correlation_structure(self):
"""Ensure the FFT-based operators satisfy conditions of correlation matrices.
Checks for symmetry and ones on the diagonal.
"""
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for test_shape in ((300,), (20, 30)):
test_size = int(np.prod(test_shape, dtype=int))
for dist in (1, 3, 10, 30):
for is_cyclic in (True, False):
corr_fun = corr_class(dist)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_shape, is_cyclic))
# This is the fastest way to get column-major
# order from da.eye.
corr_mat = corr_op.dot(np.eye(test_size).T)
with self.subTest(
corr_class=getname(corr_class), dist=dist,
test_shape=test_shape, is_cyclic=is_cyclic,
test="symmetry"):
np_tst.assert_allclose(corr_mat, corr_mat.T,
rtol=1e-14, atol=1e-15)
with self.subTest(
corr_class=getname(corr_class), dist=dist,
test_shape=test_shape, is_cyclic=is_cyclic,
test="self-correlation"):
np_tst.assert_allclose(np.diag(corr_mat), 1)
def test_1d_fft_correlation_cyclic(self):
"""Test HomogeneousIsotropicCorrelation for cyclic 1D arrays.
Check against `make_matrix` and ignore values near the edges
of the domain where the two methods are different.
"""
test_nt = 512
test_lst = (np.zeros(test_nt), np.ones(test_nt), np.arange(test_nt),
np.eye(100, test_nt)[-1])
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 3, 10):
# Magic numbers
# May need to increase for larger test_nt
noncorr_dist = 20 + 8 * dist
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, test_nt)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_nt))
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
inverse="no"):
np_tst.assert_allclose(
corr_op.dot(test_vec)[noncorr_dist:-noncorr_dist],
corr_mat.dot(test_vec)[noncorr_dist:-noncorr_dist],
rtol=1e-3, atol=1.5e-3)
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
inverse="yes"):
if ((corr_class is atmos_flux_inversion.correlations.
GaussianCorrelation and
dist >= 3)):
# Gaussian(3) has FFT less
# well-conditioned than make_matrix
raise unittest2.SkipTest(
"Gaussian({0:d}) correlations ill-conditioned".
format(dist))
elif ((corr_class is atmos_flux_inversion.correlations.
BalgovindCorrelation and
dist == 10)):
# This one distance is problematic
# Roughly 3% of the points disagree
# for the last half of the tests
# I have no idea why
raise unittest2.SkipTest(
"Balgovind(10) correlations weird")
np_tst.assert_allclose(
corr_op.solve(
test_vec)[noncorr_dist:-noncorr_dist],
la.solve(
corr_mat,
test_vec)[noncorr_dist:-noncorr_dist],
rtol=1e-3, atol=2e-3
)
def test_1d_fft_correlation_acyclic(self):
"""Test HomogeneousIsotropicCorrelation for acyclic 1D arrays.
Check against `make_matrix` and ignore values near the edges
of the domain where the two methods are different.
"""
test_nt = 512
test_lst = (np.zeros(test_nt), np.ones(test_nt), np.arange(test_nt),
np.eye(100, test_nt)[-1])
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 3, 10):
# Magic numbers
# May need to increase for larger test_nt
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, test_nt)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_nt, False))
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
inverse="no"):
np_tst.assert_allclose(
corr_op.dot(test_vec),
corr_mat.dot(test_vec),
rtol=1e-3, atol=1e-5)
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
inverse="yes"):
self.assertRaises(
NotImplementedError, corr_op.solve, test_vec)
def test_2d_fft_correlation_cyclic(self):
"""Test HomogeneousIsotropicCorrelation for cyclic 2D arrays.
Check against `make_matrix` and ignore values near the edges
where the two methods differ.
"""
test_shape = (20, 30)
test_size = np.prod(test_shape)
test_lst = (np.zeros(test_size),
np.ones(test_size),
np.arange(test_size),
np.eye(10 * test_shape[0], test_size)[-1])
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 3):
# Magic numbers
# May need to increase for larger domains
noncorr_dist = 20 + 8 * dist
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, test_shape)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_shape))
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
direction="forward"):
np_tst.assert_allclose(
corr_op.dot(test_vec).reshape(test_shape)
[noncorr_dist:-noncorr_dist,
noncorr_dist:-noncorr_dist],
corr_mat.dot(test_vec).reshape(test_shape)
[noncorr_dist:-noncorr_dist,
noncorr_dist:-noncorr_dist],
rtol=1e-3, atol=1e-5)
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
direction="backward"):
if ((corr_class is atmos_flux_inversion.correlations.
GaussianCorrelation and
dist >= 3)):
# Gaussian(3) has FFT less
# well-conditioned than make_matrix
raise unittest2.SkipTest(
"Gaussian({0:d}) correlations ill-conditioned".
format(dist))
np_tst.assert_allclose(
corr_op.solve(
test_vec).reshape(test_shape)
[noncorr_dist:-noncorr_dist,
noncorr_dist:-noncorr_dist],
la.solve(
corr_mat,
test_vec).reshape(test_shape)
[noncorr_dist:-noncorr_dist,
noncorr_dist:-noncorr_dist],
rtol=1e-3, atol=1e-5)
def test_2d_fft_correlation_acyclic(self):
"""Test HomogeneousIsotropicCorrelation for acyclic 2D arrays.
Check against `make_matrix` and ignore values near the edges
where the two methods differ.
"""
test_shape = (20, 30)
test_size = np.prod(test_shape)
test_lst = (np.zeros(test_size),
np.ones(test_size),
np.arange(test_size),
np.eye(10 * test_shape[0], test_size)[-1])
for corr_class in (
atmos_flux_inversion.correlations.DistanceCorrelationFunction.
__subclasses__()):
for dist in (1, 3):
# Magic numbers
# May need to increase for larger domains
corr_fun = corr_class(dist)
corr_mat = atmos_flux_inversion.correlations.make_matrix(
corr_fun, test_shape)
corr_op = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_shape, False))
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
direction="forward"):
np_tst.assert_allclose(
corr_op.dot(test_vec).reshape(test_shape),
corr_mat.dot(test_vec).reshape(test_shape),
rtol=1e-3, atol=1e-5)
for i, test_vec in enumerate(test_lst):
with self.subTest(corr_class=getname(corr_class),
dist=dist, test_num=i,
direction="backward"):
self.assertRaises(
NotImplementedError, corr_op.solve, test_vec)
def test_homogeneous_from_array_cyclic(self):
"""Make sure cyclic from_array can be roundtripped.
Also tests that odd state sizes work.
"""
test_size = 25
corr_class = atmos_flux_inversion.correlations.ExponentialCorrelation
for dist in (1, 3, 5):
with self.subTest(dist=dist):
corr_fun = corr_class(dist)
corr_op1 = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_function(corr_fun, test_size, True))
first_column = corr_op1.dot(np.eye(test_size, 1)[:, 0])
corr_op2 = (
atmos_flux_inversion.correlations.
HomogeneousIsotropicCorrelation.
from_array(first_column))
np_tst.assert_allclose(
corr_op1.dot( | np.eye(test_size) | numpy.eye |
import numpy as np
import matplotlib.pyplot as plt
import cvxpy as cp
'''
In order to append / modify a problem, you should:
(a) Add a new target function and modify the target method
(b) Add its analytical solution and modify the solve_analytical method
(c) Add the new problem to AdmmCentralized class (if you want to use ADMM centralized), i.e., modify updates of x and z
(d) Add the new problem to AdmmDistributedAgent class (if you want to use ADMM distributed), i.e., modify updates of x and z
'''
class AdmmDistributed(object): # Class that prepares data for distributed training
'''
Note that this class is a convenient way to test a distributed ADMM implementation. In real deployments, no agent has
access to all data (as this class does) and hence, it is not possible to compute the global loss unless we split the
regularizer term among all distributed agents. In a real deployment, also, the analytical solution is not available.
Note that this function is provided just for illustration and testing purposes.
'''
def __init__(self, data_in, data_out, problem, lam=0.1, rho=10, grad_steps=10, grad_mu=0.1):
if type(data_in) is not list or type(data_out) is not list:
raise RuntimeError('Data must be provided as a list of numpy arrays per agent')
if len(data_in) is not len(data_out):
raise RuntimeError('Input and output data lists must have the same number of elements')
self.na = len(data_in)
self.problem = problem # Problem to be solved
# To store training values
self.f = None # To store function values (global)
# ADMM parameters
self.lam = lam # Regularizer
self.rho = rho # Quadratic weight
self.grad_steps = grad_steps # Number of steps per iteration in gradient / subgradient method (if needed)
self.grad_mu = grad_mu # Step size in gradient / subgradient method (if needed)
# Store global data
self.global_data_in = np.vstack(data_in)
self.global_data_out = np.hstack(data_out)
self.global_data_out = self.global_data_out.reshape([self.global_data_out.size, 1])
# Build ADMM agents
self.agents = [AdmmDistributedAgent(data_in[i], data_out[i], self.global_data_in.shape[0], self.na,
self.problem, lam=self.lam, rho=self.rho, grad_steps=self.grad_steps,
grad_mu=self.grad_mu) for i in range(self.na)]
# Analytical solution (for comparison purposes)
self.fopt, self.xopt = self.solve_analytical()
def function(self, x): # Global target function (this function is only for illustration purposes)
return target(self.global_data_in, self.global_data_out, self.lam, x, self.problem, z=None, na=1)
def solve_analytical(self):
return solve_analytical(self.global_data_in, self.global_data_out, self.lam, self.problem)
def train(self, niter): # Distributed ADMM training!
for agent in self.agents:
agent.initialize() # Initialize local values
self.f = [] # Initialize global f value
for iter in range(niter):
# Update x (locally)
for agent in self.agents:
agent.x_update(agent.x[-1], agent.y[-1], agent.z[-1])
# Update z (globally!)
sum_x = np.zeros_like(self.agents[0].x[-1])
sum_y = np.zeros_like(self.agents[0].y[-1])
for agent in self.agents:
sum_x += agent.x[-1]
sum_y += agent.y[-1]
for agent in self.agents:
agent.z_update(sum_x / self.na, sum_y / self.na, agent.z[-1])
# Update y (locally)
for agent in self.agents:
agent.y_update(agent.x[-1], agent.y[-1], agent.z[-1])
# Update global f: make use of z (global value, shared by all agents)
self.f.append(self.function(self.agents[0].z[-1]))
def plot(self):
# Plot the losses using the global variable z and all the data
plt.plot(10 * np.log10(np.square(np.array(self.f) - self.fopt) + np.finfo(float).eps), 'b', label='global')
# Plot also the losses using the local terms, x and z (the actual values obtained: the gap is due to x != z)
sum_f_local = np.sum(np.array([np.array(agent.f) for agent in self.agents]), axis=0)
plt.plot(10 * np.log10(np.square(sum_f_local - self.fopt) + np.finfo(float).eps), 'r', label='local')
plt.title('ADMM distributed global loss')
plt.xlabel('Iteration')
plt.ylabel('MSE')
plt.legend(loc='best')
plt.show()
'''
for i, agent in enumerate(self.agents):
plt.plot(agent.f, label=str(i))
plt.plot(self.f, label='Global value')
plt.title('ADMM distributed function values: local and global')
plt.xlabel('Iteration')
plt.ylabel('Value')
plt.legend(loc='best')
plt.show()
for i, agent in enumerate(self.agents):
plt.plot(np.squeeze(np.array(agent.x)), label=str(i))
plt.title('ADMM distributed x')
plt.xlabel('Iteration')
plt.ylabel('x value')
plt.legend(loc='best')
plt.show()
for i, agent in enumerate(self.agents):
plt.plot(np.squeeze(np.array(agent.y)), label=str(i))
plt.title('ADMM distributed y')
plt.xlabel('Iteration')
plt.ylabel('y value')
plt.legend(loc='best')
plt.show()
for i, agent in enumerate(self.agents):
plt.plot(np.squeeze(np.array(agent.z)), label=str(i))
plt.title('ADMM distributed z')
plt.xlabel('Iteration')
plt.ylabel('z value')
plt.legend(loc='best')
plt.show()
for i, agent in enumerate(self.agents):
plt.plot(10 * np.log10(
np.sum(np.square(np.squeeze(np.array(agent.z)) - np.squeeze(np.array(agent.x))), axis=1) + np.finfo(
float).eps), label=str(i))
plt.title('ADMM distributed x-z convergence')
plt.xlabel('Iteration')
plt.ylabel('MSE')
plt.legend(loc='best')
plt.show()
'''
class AdmmDistributedAgent(object):
def __init__(self, local_data_in, local_data_out, d_tot, na, problem, lam=0.1, rho=10, grad_steps=10, grad_mu=0.001):
self.ndata = local_data_in.shape[0] # Number of data points (local dataset)
self.ndata_tot = d_tot # Number of data points (global dataset)
self.data_dim = local_data_in.shape[1] # Number of features per data point
self.data_in = local_data_in # Feature matrix
self.data_out = local_data_out.reshape([self.ndata, 1]) # Labels / regression targets
self.na = na # Number of agents cooperating
self.problem = problem # Problem to be solved
# To store training values
self.x = None # To store x values (local)
self.y = None # To store y values (local)
self.z = None # To store z values (global)
self.f = None # To store function values (local)
# ADMM parameters
self.lam = lam # Regularizer
self.rho = rho # Quadratic weight
self.grad_steps = grad_steps # Number of steps per iteration in gradient / subgradient method (if needed)
self.grad_mu = grad_mu # Step size in gradient / subgradient method (if needed)
def function(self, x, z): # Local target function
return target(self.data_in, self.data_out, self.lam, x, self.problem, z=z, na=self.na, ntot=self.ndata_tot)
def x_update(self, x, y, z):
if self.problem is "lasso" or self.problem is "ridge":
term1 = np.linalg.inv(2 / self.ndata_tot * self.data_in.T @ self.data_in + self.rho * np.eye(self.data_dim))
term2 = 2 / self.ndata_tot * self.data_in.T @ self.data_out + self.rho * (z - y)
xnew = term1 @ term2
elif self.problem is "svm": # In this case, we use a subgradient approach for the hinge function
for it in range(self.grad_steps):
d = np.diag(np.squeeze(self.data_out)) @ self.data_in
term1 = -1 / self.ndata_tot * np.sum(d[np.squeeze(d @ x < 1), :], axis=0).reshape([self.data_dim, 1])
x = x - self.grad_mu * (term1 + self.rho * (x - z + y))
xnew = x
elif self.problem is "logistic": ## We use a gradient method for the logistic function
for it in range(self.grad_steps):
d = np.diag(np.squeeze(self.data_out)) @ self.data_in
denominator = np.repeat(1 + np.exp(d @ x), self.data_dim, axis=1)
term1 = -1 / self.ndata_tot * np.sum(d / denominator, axis=0).reshape([self.data_dim, 1])
x = x - self.grad_mu * (term1 + self.rho * (x - z + y))
xnew = x
else:
raise RuntimeError('Problem not recognized')
self.x.append(xnew)
def y_update(self, x, y, z):
ynew = y + x - z
self.y.append(ynew)
# Update also the function value!
self.f.append(self.function(x, z))
def z_update(self, x, y, z): # In this case, x and y are the average of local x and y values!!
if self.problem is "lasso":
q = x + y
v = self.lam / (self.na * self.rho)
znew = np.maximum(np.zeros_like(q), q - v) - np.maximum(np.zeros_like(q), - q - v)
elif self.problem is "ridge" or self.problem is "svm" or self.problem is "logistic":
znew = (x+y) * self.rho * self.na / (self.lam + self.rho * self.na)
else:
raise RuntimeError('Problem not recognized')
self.z.append(znew)
def initialize(self):
# Initialize values
self.x = [] # To store x values
self.y = [] # To store y values
self.z = [] # To store z values
self.f = [] # To store target function values
self.x.append(np.zeros((self.data_dim, 1)))
self.y.append(np.zeros((self.data_dim, 1)))
self.z.append(np.zeros((self.data_dim, 1)))
self.f.append(self.function(self.x[-1], self.z[-1]))
class AdmmCentralized(object):
def __init__(self, data_in, data_out, problem, lam=0.1, rho=10, grad_steps=10, grad_mu=0.001):
self.ndata = data_in.shape[0] # Number of data points
self.data_dim = data_in.shape[1] # Number of features per data point
self.data_in = data_in # Feature matrix
self.data_out = data_out.reshape([self.ndata, 1]) # Labels / regression targets
self.problem = problem # Problem to be solved
# To store training values
self.x = None # To store x values
self.y = None # To store y values
self.z = None # To store z values
self.f = None # To store function values
# ADMM parameters
self.lam = lam # Regularizer
self.rho = rho # Quadratic weight
self.grad_steps = grad_steps # Number of steps per iteration in gradient / subgradient method (if needed)
self.grad_mu = grad_mu # Step size in gradient / subgradient method (if needed)
# Analytical solution (for comparison purposes)
self.fopt, self.xopt = self.solve_analytical()
def function(self, x): # Target function
return target(self.data_in, self.data_out, self.lam, x, self.problem, z=None, na=1)
def solve_analytical(self):
return solve_analytical(self.data_in, self.data_out, self.lam, self.problem)
def x_update(self, x, y, z):
if self.problem is "lasso" or self.problem is "ridge":
term1 = np.linalg.inv(2 / self.ndata * self.data_in.T @ self.data_in + self.rho * np.eye(self.data_dim))
term2 = 2 / self.ndata * self.data_in.T @ self.data_out + self.rho*(z-y)
xnew = term1 @ term2
elif self.problem is "svm": # In this case, we use a subgradient approach for the hinge function
for it in range(self.grad_steps):
d = np.diag(np.squeeze(self.data_out)) @ self.data_in
term1 = -1 / self.ndata * np.sum(d[np.squeeze(d @ x < 1), :], axis=0).reshape([self.data_dim, 1])
x = x - self.grad_mu * (term1 + self.rho * (x - z + y))
xnew = x
elif self.problem is "logistic": ## We use a gradient method for the logistic function
for it in range(self.grad_steps):
d = np.diag(np.squeeze(self.data_out)) @ self.data_in
denominator = np.repeat(1 + np.exp(d @ x), self.data_dim, axis=1)
term1 = -1 / self.ndata * np.sum(d / denominator, axis=0).reshape([self.data_dim, 1])
x = x - self.grad_mu * (term1 + self.rho * (x - z + y))
xnew = x
else:
raise RuntimeError('Problem not recognized')
self.x.append(xnew)
def y_update(self, x, y, z): # Always the same, we update the function value here!
ynew = y + x - z
self.y.append(ynew)
# Update also the function value!
self.f.append(self.function(x))
def z_update(self, x, y, z):
if self.problem is "lasso":
q = x + y
v = self.lam / self.rho
znew = np.maximum(np.zeros_like(q), q - v) - np.maximum(np.zeros_like(q), - q - v)
elif self.problem is "ridge" or self.problem is "svm" or self.problem is "logistic":
znew = (x + y) * self.rho / (self.lam + self.rho)
else:
raise RuntimeError('Problem not recognized')
self.z.append(znew)
def initialize(self):
self.x = [] # To store x values
self.y = [] # To store y values
self.z = [] # To store z values
self.f = [] # To store target function values
self.x.append(np.zeros((self.data_dim, 1)))
self.y.append(np.zeros((self.data_dim, 1)))
self.z.append(np.zeros((self.data_dim, 1)))
self.f.append(self.function(self.x[-1]))
def train(self, niter): # Train centralized ADMM
# Initialize values
self.initialize()
# Iterate ADMM
for iter in range(niter):
self.x_update(self.x[-1], self.y[-1], self.z[-1]) # Update x
self.z_update(self.x[-1], self.y[-1], self.z[-1]) # Update z
self.y_update(self.x[-1], self.y[-1], self.z[-1]) # Update y (and store the function value!)
def plot(self):
'''
plt.plot(np.squeeze(np.array(self.x)), 'b', label='x')
plt.plot(np.squeeze(np.array(self.y)), 'r', label='y')
plt.plot(np.squeeze(np.array(self.z)), 'g', label='z')
plt.title('ADMM centralized values')
plt.xlabel('Iteration')
plt.ylabel('Value')
plt.legend(loc='best')
plt.show()
plt.plot(10 * np.log10(np.square(np.array(self.f))))
plt.title('ADMM centralized function')
plt.xlabel('Iteration')
plt.ylabel('MSE')
plt.show()
'''
plt.plot(10 * np.log10(np.square(np.array(self.f) - self.fopt) + np.finfo(float).eps))
plt.title('ADMM centralized loss')
plt.xlabel('Iteration')
plt.ylabel('MSE')
plt.show()
# Target functions (in distributed, x is local and z is global)
def target(data_in, data_out, lam, x, problem, z=None, na=1, ntot=None):
if problem is "lasso":
return target_lasso(data_in, data_out, lam, x, z, na=na, ntot=ntot)
elif problem is "svm":
return target_svm(data_in, data_out, lam, x, z, na=na, ntot=ntot)
elif problem is "ridge":
return target_ridge(data_in, data_out, lam, x, z, na=na, ntot=ntot)
elif problem is "logistic":
return target_logistic(data_in, data_out, lam, x, z, na=na, ntot=ntot)
else:
raise RuntimeError('Problem not recognized')
def target_lasso(data_in, data_out, lam, x, z, na=1, ntot=None):
if ntot is None:
ntot = data_in.shape[0]
if z is None:
return np.sum(np.square(data_in @ x - data_out)) / ntot + lam * np.sum(np.abs(x)) / na
else:
return np.sum(np.square(data_in @ x - data_out)) / ntot + lam * np.sum( | np.abs(z) | numpy.abs |
"""
.. module:: optimization_problems
:synopsis: Optimization test problems for multi-modal and
box-constrained global optimization
.. moduleauthor:: <NAME> <<EMAIL>>,
<NAME> <<EMAIL>>
:Module: optimization_problems
:Author: <NAME> <<EMAIL>>,
<NAME> <<EMAIL>>
"""
import numpy as np
import abc
from abc import abstractmethod
class OptimizationProblem(object):
"""Base class for optimization problems."""
__metaclass__ = abc.ABCMeta
def __init__(self):
self.dim = None
self.lb = None
self.ub = None
self.int_var = None
self.cont_var = None
def __check_input__(self, x):
if len(x) != self.dim:
raise ValueError('Dimension mismatch')
@abstractmethod
def eval(self, record): # pragma: no cover
pass
# ========================= 2-dimensional =======================
class GoldsteinPrice(OptimizationProblem):
def __init__(self):
self.info = "2-dimensional Goldstein-Price function"
self.min = 3.0
self.minimum = np.array([0, -1])
self.dim = 2
self.lb = -2.0 * np.ones(2)
self.ub = 2.0 * np.ones(2)
self.int_var = np.array([])
self.cont_var = np.arange(0, 2)
def eval(self, x):
"""Evaluate the GoldStein Price function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
x1 = x[0]
x2 = x[1]
fact1a = (x1 + x2 + 1) ** 2
fact1b = 19 - 14 * x1 + 3 * x1 ** 2 - \
14 * x2 + 6 * x1 * x2 + 3 * x2 ** 2
fact1 = 1 + fact1a * fact1b
fact2a = (2 * x1 - 3 * x2) ** 2
fact2b = 18 - 32 * x1 + 12 * x1 ** 2 + 48 * x2 - \
36 * x1 * x2 + 27 * x2 ** 2
fact2 = 30 + fact2a * fact2b
return fact1 * fact2
class SixHumpCamel(OptimizationProblem):
"""Six-hump camel function
Details: https://www.sfu.ca/~ssurjano/camel6.html
Global optimum: :math:`f(0.0898,-0.7126)=-1.0316`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self):
self.min = -1.0316
self.minimum = np.array([0.0898, -0.7126])
self.dim = 2
self.lb = -3.0 * np.ones(2)
self.ub = 3.0 * np.ones(2)
self.int_var = np.array([])
self.cont_var = np.arange(0, 2)
self.info = "2-dimensional Six-hump function \nGlobal optimum: " +\
"f(0.0898, -0.7126) = -1.0316"
def eval(self, x):
"""Evaluate the Six Hump Camel function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
return (4.0 - 2.1*x[0]**2 + (x[0]**4)/3.0)*x[0]**2 + \
x[0]*x[1] + (-4 + 4*x[1]**2) * x[1]**2
class Branin(OptimizationProblem):
"""Branin function
Details: http://www.sfu.ca/~ssurjano/branin.html
Global optimum: :math:`f(-\\pi,12.275)=0.397887`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self):
self.min = 0.397887
self.minimum = np.array([-np.pi, 12.275])
self.dim = 2
self.lb = -3.0 * np.ones(2)
self.ub = 3.0 * np.ones(2)
self.int_var = np.array([])
self.cont_var = np.arange(0, 2)
self.info = "2-dimensional Branin function \nGlobal optimum: " +\
"f(-pi, 12.275) = 0.397887"
def eval(self, x):
"""Evaluate the Branin function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
x1 = x[0]
x2 = x[1]
t = 1 / (8 * np.pi)
s = 10
r = 6
c = 5 / np.pi
b = 5.1 / (4 * np.pi ** 2)
a = 1
term1 = a * (x2 - b * x1 ** 2 + c * x1 - r) ** 2
term2 = s * (1 - t) * np.cos(x1)
return term1 + term2 + s
# ========================= 3-dimensional =======================
class Hartman3(OptimizationProblem):
"""Hartman 3 function
Details: http://www.sfu.ca/~ssurjano/hart3.html
Global optimum: :math:`f(0.114614,0.555649,0.852547)=-3.86278`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self):
self.dim = 3
self.lb = np.zeros(3)
self.ub = np.ones(3)
self.int_var = np.array([])
self.cont_var = np.arange(0, 3)
self.min = -3.86278
self.minimum = np.array([0.114614, 0.555649, 0.852547])
self.info = "3-dimensional Hartman function \nGlobal optimum: " +\
"f(0.114614,0.555649,0.852547) = -3.86278"
def eval(self, x):
"""Evaluate the Hartman 3 function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
alpha = np.array([1, 1.2, 3, 3.2])
A = np.array([[3.0, 10.0, 30.0], [0.1, 10.0, 35.0],
[3.0, 10.0, 30.0], [0.1, 10.0, 35.0]])
P = np.array([[0.3689, 0.1170, 0.2673],
[0.4699, 0.4387, 0.747],
[0.1091, 0.8732, 0.5547],
[0.0381, 0.5743, 0.8828]])
outer = 0
for ii in range(4):
inner = 0
for jj in range(3):
xj = x[jj]
Aij = A[ii, jj]
Pij = P[ii, jj]
inner += Aij * ((xj-Pij) ** 2)
outer += alpha[ii] * np.exp(-inner)
return -outer
# =========================6-dimensional =======================
class Hartman6(OptimizationProblem):
"""Hartman 6 function
Details: http://www.sfu.ca/~ssurjano/hart6.html
Global optimum: :math:`f(0.201,0.150,0.476,0.275,0.311,0.657)=-3.322`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self):
self.min = -3.32237
self.minimum = np.array([0.20169, 0.150011, 0.476874,
0.275332, 0.311652, 0.6573])
self.dim = 6
self.lb = np.zeros(6)
self.ub = np.ones(6)
self.int_var = np.array([])
self.cont_var = np.arange(0, 6)
self.info = "6-dimensional Hartman function \nGlobal optimum: " + \
"f(0.2016,0.15001,0.47687,0.27533,0.31165,0.657) = -3.3223"
def eval(self, x):
"""Evaluate the Hartman 6 function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
alpha = np.array([1.0, 1.2, 3.0, 3.2])
A = np.array([[10.0, 3.0, 17.0, 3.5, 1.7, 8.0],
[0.05, 10.0, 17.0, 0.1, 8.0, 14.0],
[3.0, 3.5, 1.7, 10.0, 17.0, 8.0],
[17.0, 8.0, 0.05, 10.0, 0.1, 14.0]])
P = 1e-4 * np.array([[1312.0, 1696.0, 5569.0, 124.0, 8283.0, 5886.0],
[2329.0, 4135.0, 8307.0, 3736.0, 1004.0, 9991.0],
[2348.0, 1451.0, 3522.0, 2883.0, 3047.0, 6650.0],
[4047.0, 8828.0, 8732.0, 5743.0, 1091.0, 381.0]])
outer = 0
for ii in range(4):
inner = 0
for jj in range(6):
xj = x[jj]
Aij = A[ii, jj]
Pij = P[ii, jj]
inner += Aij * ((xj - Pij) ** 2)
outer += alpha[ii] * np.exp(-inner)
return -outer
# ========================= n-dimensional =======================
class Rastrigin(OptimizationProblem):
"""Rastrigin function
.. math::
f(x_1,\\ldots,x_n)=10n-\\sum_{i=1}^n (x_i^2 - 10 \\cos(2 \\pi x_i))
subject to
.. math::
-5.12 \\leq x_i \\leq 5.12
Global optimum: :math:`f(0,0,...,0)=0`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self, dim=10):
self.dim = dim
self.min = 0
self.minimum = np.zeros(dim)
self.lb = -5.12 * np.ones(dim)
self.ub = 5.12 * np.ones(dim)
self.int_var = np.array([])
self.cont_var = np.arange(0, dim)
self.info = str(dim) + "-dimensional Rastrigin function \n" + \
"Global optimum: f(0,0,...,0) = 0"
def eval(self, x):
"""Evaluate the Rastrigin function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
return 10 * self.dim + sum(x**2 - 10 * np.cos(2 * np.pi * x))
class Ackley(OptimizationProblem):
"""Ackley function
.. math::
f(x_1,\\ldots,x_n) = -20\\exp\\left( -0.2 \\sqrt{\\frac{1}{n} \
\\sum_{j=1}^n x_j^2} \\right) -\\exp \\left( \\frac{1}{n} \
\\sum{j=1}^n \\cos(2 \\pi x_j) \\right) + 20 - e
subject to
.. math::
-15 \\leq x_i \\leq 20
Global optimum: :math:`f(0,0,...,0)=0`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self, dim=10):
self.dim = dim
self.min = 0
self.minimum = np.zeros(dim)
self.lb = -15 * np.ones(dim)
self.ub = 20 * np.ones(dim)
self.int_var = np.array([])
self.cont_var = np.arange(0, dim)
self.info = str(dim) + "-dimensional Ackley function \n" +\
"Global optimum: f(0,0,...,0) = 0"
def eval(self, x):
"""Evaluate the Ackley function at x
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
d = float(self.dim)
return -20.0 * np.exp(-0.2*np.sqrt(np.sum(x**2) / d)) - \
np.exp(np.sum(np.cos(2.0*np.pi*x)) / d) + 20 + np.exp(1)
class Michalewicz(OptimizationProblem):
"""Michalewicz function
.. math::
f(x_1,\\ldots,x_n) = -\\sum_{i=1}^n \\sin(x_i) \\sin^{20}
\\left( \\frac{ix_i^2}{\\pi} \\right)
subject to
.. math::
0 \\leq x_i \\leq \\pi
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self, dim=10):
self.dim = dim
self.lb = np.zeros(dim)
self.ub = np.pi * np.ones(dim)
self.int_var = np.array([])
self.cont_var = np.arange(0, dim)
self.info = str(dim) + "-dimensional Michalewicz function \n" + \
"Global optimum: ??"
def eval(self, x):
"""Evaluate the Michalewicz function at x.
:param x: Data point
:type x: numpy.array
:return: Value at x
:rtype: float
"""
self.__check_input__(x)
return -np.sum(np.sin(x) * (
np.sin(((1 + np.arange(self.dim)) * x**2)/np.pi)) ** 20)
class Levy(OptimizationProblem):
"""Levy function
Details: https://www.sfu.ca/~ssurjano/levy.html
Global optimum: :math:`f(1,1,...,1)=0`
:ivar dim: Number of dimensions
:ivar lb: Lower variable bounds
:ivar ub: Upper variable bounds
:ivar int_var: Integer variables
:ivar cont_var: Continuous variables
:ivar min: Global minimum value
:ivar minimum: Global minimizer
:ivar info: String with problem info
"""
def __init__(self, dim=10):
self.dim = dim
self.min = 0.0
self.minimum = np.ones(dim)
self.lb = -5 * np.ones(dim)
self.ub = 5 * np.ones(dim)
self.int_var = np.array([])
self.cont_var = | np.arange(0, dim) | numpy.arange |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Gaussian quadrature in 2D space
"""
import numpy as np
class GaussianQuadrature(object):
""" Abstract base class of GaussianQuadrature
"""
def __init__(self, order):
"""Constructor based on order of quadrature
Parameters
----------
order : int
Order of quadrature
"""
self.order = order
self.quad_pts_1d, self.quad_wts_1d = list(map(
np.array, np.polynomial.legendre.leggauss(order)))
self.quad_pts, self.quad_wts = self.calculate_quadrature_points_and_weights()
self.shape_at_quads = self.shape(self.quad_pts)
self.shape_derivative_at_quads = self.shape_derivative(self.quad_pts)
def shape(self, pts):
""" Abstract shape function
Parameters
----------
pts: np.ndarray
Coordinates of the nodes
"""
raise NotImplementedError()
def shape_derivative(self, pts):
""" Abstract shape derivative function
Parameters
----------
pts: np.ndarray
Coordinates of the nodes
"""
raise NotImplementedError()
def calculate_quadrature_points_and_weights(self):
""" Calculate quadrature points and weights
Parameters
----------
pts: np.ndarray
Coordinates of the nodes
Returns
-------
pts : real
quadrature points
"""
raise NotImplementedError()
def number_of_quadrature_points(self):
""" Get the number of quadrature points
Returns
-------
int
Number of quadrature points
"""
return len(self.quad_pts)
def values_at_quadrature_points(self, values):
""" Get quadrature points
Parameters
----------
values: numpy.array
values at quadrature points
Returns
-------
numpy.array
values of Gaussian quadrature points
"""
return self.shape_at_quads.dot(values)
def integrate(self, vertices, values=None):
""" Integrate values over a quad element
Parameters
----------
vertices: numpy.array
coordinates of vertices (or nodes) with values.
The shape needs to be (:, 2)
values: numpy.array, optional
A value vector at vertices
If it is not provided, the thrid column will be used
as values
Returns
-------
float
result of integration
"""
if values is None:
values = vertices[:, 2]
return self.quadrature_vector(vertices).dot(values)
def quadrature_vector(self, vertices):
""" Create a quadrature matrix for quadrature integration
Taking a dot product this quadrature matrix and the values
at the quadrature points will result in quadrature
Parameters
----------
vertices: numpy.array
coordinates of vertices (or nodes) with values.
The shape needs to be (:, 2)
Returns
-------
numpy.array
two-dimensional matrix of quadrature matrix
"""
jacobian = self.jacobian_det(vertices)
return (self.quad_wts * jacobian).dot(self.shape_at_quads)
def jacobian_det(self, vertices):
""" Determinant of Jacobian at quadrature points
Parameters
----------
vertices: numpy.array
coordinates of vertices (or nodes)
The shape needs to be (:, 2) or more columns.
The values in the first two columns will be used.
Returns
-------
numpy.array
array of determinant ant quadrature points
"""
jacobian = self.shape_derivative_at_quads.dot(vertices[:, :2])
return np.array([np.linalg.det(j) for j in jacobian])
def domain_size(self, vertices):
""" Abstract method to calculate the size of the domain
"""
raise NotImplementedError()
def average(self, vertices, values):
""" Calculate the average of the value over the domain.
Parameters
----------
vertices: numpy.array
coordinates of vertices (or nodes)
The shape needs to be (:, 2) or more columns.
The values in the first two columns will be used.
Returns
-------
float
the length of the line
"""
return (self.integrate(vertices, values)
/ self.domain_size(vertices))
class GaussianQuadratureLine2(GaussianQuadrature):
""" Gaussian quadrature on line with two end points in 2-D space
"""
def __init__(self, order):
""" Constructor
Parameters
----------
order: int
Order of Gaussian quadrature
"""
self.n_vertices = 2
super(GaussianQuadratureLine2, self).__init__(order)
def calculate_quadrature_points_and_weights(self):
return self.quad_pts_1d, self.quad_wts_1d
def shape(self, pts):
""" Shape functions
Ordering is counter-clockwise direction
Parameters
----------
pts: numpy.array
Local coordinates.
Returns
-------
numpy.array
matrix of shape function value at the given points
"""
return np.array([0.5 * (1.0 - pts), 0.5 * (1.0 + pts)]).transpose()
def shape_derivative(self, pts):
""" Derivatives of shape functions
Parameters
----------
pts: numpy.array
Local coordinates. The dimension is (-1, 2)
Returns
-------
numpy.array
matrix of shape function derivative wrt xi value at (*, eta)
"""
return np.array([[-0.5, 0.5] for _ in range(len(pts))])
def jacobian_det(self, vertices):
""" Determinant of Jacobian at quadrature points, 1-D version
Parameters
----------
vertices: numpy.array
coordinates of vertices (or nodes)
The shape needs to be (-1, 2) or more columns.
The values in the first two columns will be used.
Returns
-------
numpy.array
array of determinant ant quadrature points
"""
l = self.domain_size(vertices)
n_quad_pts = len(self.quad_pts)
return np.ones((n_quad_pts)) * (l * 0.5)
def domain_size(self, vertices):
""" Size of domian, which is the length of the line in this case
Parameters
----------
vertices: numpy.array
coordinates of vertices (or nodes)
The shape needs to be (2, 2) or more columns.
The values in the first two columns will be used.
Returns
-------
float
the length of the line
"""
return np.linalg.norm(vertices[1, :2] - vertices[0, :2])
def average(self, vertices, values):
""" Integrate values over a quad element
Parameters
----------
vertices: numpy.array
coordinates of vertices (or nodes) with values.
The shape needs to be (-1, 2)
values: numpy.array
The values at vertices
Returns
-------
average : float
result of integration
"""
wts = self.quad_wts
val_at_pts = self.shape_at_quads.dot(values)
return np.sum(wts * val_at_pts) * 0.5
class GaussianQuadratureQuad4(GaussianQuadrature):
""" Gaussian Quadrature for a quadrilateral with four nodes
"""
def __init__(self, order):
""" Constructor
Parameters
----------
order: int
order of Gaussian quadrature between 2 and 5
"""
self.n_vertices = 4
super(GaussianQuadratureQuad4, self).__init__(order)
def calculate_quadrature_points_and_weights(self):
""" Calculate quadrature points and weights
"""
quad_pts = np.array([(self.quad_pts_1d[i], self.quad_pts_1d[j])
for i in range(self.order) for j in range(self.order)])
wts = self.quad_wts_1d.reshape(self.order, -1)
quad_wts = wts.dot(wts.transpose()).flatten()
return quad_pts, quad_wts
def domain_size(self, vertices):
""" Size of domain, which is the area of the quadrilateral
in this case.
The area is calculated with shoelace equation.
Parameters
----------
vertices: numpy.array
coordinates of vertices (or nodes)
The shape needs to be (4, 2) or more columns.
The values in the first two columns will be used.
Returns
-------
ret_size: float
the length of the line
"""
return 0.5 * np.abs(np.dot(vertices[:, 0], np.roll(vertices[:, 1], 1))
- np.dot(np.roll(vertices[:, 0], 1), vertices[:, 1]))
def shape(self, pts):
""" Shape functions
Ordering is counter-clockwise direction
Parameters
----------
pts: numpy.array
Local coordinates. The dimension is (-1, 2)
Returns
-------
numpy.array
matrix of shape function value at (xi, eta)
"""
return np.array([0.25 * (1.0 - pts[:, 0]) * (1.0 - pts[:, 1]),
0.25 * (1.0 + pts[:, 0]) * (1.0 - pts[:, 1]),
0.25 * (1.0 + pts[:, 0]) * (1.0 + pts[:, 1]),
0.25 * (1.0 - pts[:, 0]) * (1.0 + pts[:, 1])]).transpose()
def shape_derivative(self, pts):
""" Derivatives of shape functions
Parameters
----------
pts: numpy.array
Local coordinates. The dimension is (-1, 2)
Returns
-------
numpy.array
matrix of shape function derivative wrt xi value at (*, eta)
"""
derivative = np.stack((np.array([-0.25 * (1.0 - pts[:, 1]),
0.25 * (1.0 - pts[:, 1]),
0.25 * (1.0 + pts[:, 1]),
-0.25 * (1.0 + pts[:, 1])]).transpose(),
np.array([-0.25 * (1.0 - pts[:, 0]),
-0.25 * (1.0 + pts[:, 0]),
0.25 * (1.0 + pts[:, 0]),
0.25 * (1.0 - pts[:, 0])]).transpose()))
return np.swapaxes(derivative, 0, 1)
def jacobian(self, vertices, local_coord=None):
""" Create a Jacobian matrix or matrixes at the given local
coordinates
Parameters
----------
vertices: numpy.array
coordinates of vertices (or nodes)
The shape needs to be (4, 2) or more columns.
The values in the first two columns will be used.
local_coord: numpy.array
local coordinates where a Jacobian is calculated
Returns
-------
numpy.array
Jacobian matrix
"""
if local_coord is None:
return self.shape_derivative_at_quads.dot(vertices[:, :2])
else:
return self.shape_derivative(local_coord).dot(vertices[:, :2])
class GaussianQuadratureTri3(GaussianQuadrature):
""" Gaussian Quadrature for triangles with three nodes
"""
def __init__(self, order):
""" Constructor
Parameters
----------
order: int
order of Gaussian quadrature between 2 and 5
"""
super(GaussianQuadratureTri3, self).__init__(order)
def calculate_quadrature_points_and_weights(self):
""" Calculate quadrature points and weights
"""
if self.order == 2:
quad_pts = np.array([[1. / 6., 1. / 6., 2. / 3.],
[1. / 6., 2. / 3., 1. / 6.],
[2. / 3., 1. / 6., 1. / 6.]])
quad_wts = | np.array([1. / 3., 1. / 3., 1. / 3.]) | numpy.array |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = | np.array([]) | numpy.array |
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
import seaborn as sns
import numpy as np
import skill_metrics as sm
import pandas as pd
import sys, getopt
from os import path
import json
mpl.rc('font', size=10)
sns.set()
dbPath = '/home/scr/Projects/CMIP_backend/src/script/data/sites-stat.json'
def taylor(stds, rmses, coefs, labels, title):
try:
fpath = './src/script/data/taylor-' + title + '.jpg'
sm.taylor_diagram(np.array(stds), np.array(rmses), np.array(coefs),
markerLabel = labels, markerLabelColor = 'r', markerSize = 6, markerLegend = 'on',
colOBS = 'g', styleOBS = '-', markerobs = 'o',
# tickRMS = np.arange(0,25,10),
# tickSTD = np.arange(9,20,5),
# tickCOR = intervalsCOR,
showlabelsRMS = 'on', titleRMS = 'on', titleOBS = 'Fluxnet',
rmslabelformat = ':.1f')
# plt.title(title, y=1.06, fontsize='large', loc='center', horizontalalignment='center')
plt.tight_layout()
plt.savefig(fpath, format='jpg', transparent=False)
plt.close('all')
except Exception as instance:
print(instance)
def heatmap(df, title):
try:
fpath = './src/script/data/heatmap-' + title + '.jpg'
fig, ax = plt.subplots(figsize=(8,5))
fmt = lambda x,pos: round(x,2)
sns.heatmap(df, annot=True, fmt='.2f', linewidths=.5,ax=ax, cmap='YlGnBu')
# plt.yticks(rotation=90)
plt.tight_layout()
# ax.set_xlabel('PFT')
# ax.set_ylabel(title)
fig.subplots_adjust(left=.15, bottom=.15)
fig.text(0.515, 0.03, 'Observed ' + featureName + ' (kgC m-2 y-1)', ha="center", va="center")
fig.text(0.03, 0.5, featureName + ' (kgC m-2 y-1)', ha="center", va="center", rotation=90)
# plt.title(title)
plt.savefig(fpath, format='jpg', transparent=False)
plt.close('all')
except Exception as instance:
print(instance)
def plotTaylor():
with open(dbPath) as load_f:
stats = json.load(load_f)
for stat in stats:
title = featureName + '-' + stat['pft']
labels = ['Fluxnet', 'IBIS', 'Biome-BGC', 'LPJ', 'MODIS']
stds = np.concatenate((np.array(stat['avg_std'][4:]), np.array(stat['avg_std'][:4])))
rmses = np.concatenate((np.array([0]), np.array(stat['avg_rmse'][:4])))
coefs = np.concatenate((np.array([1]), np.array(stat['avg_coef'][:4])))
taylor(stds, rmses, coefs, labels, title)
print('--------finished')
def plotHeatmap():
with open(dbPath) as load_f:
stats = json.load(load_f)
meanDict = {}
stdDict = {}
coefDict = {}
r2sDict = {}
rmseDict = {}
for stat in stats:
stat['avg_coef'] = np.array(stat['avg_coef'])
stat['avg_r2'] = np.array(stat['avg_r2'])
stat['avg_nse'] = np.array(stat['avg_nse'])
stat['avg_coef'][stat['avg_coef']>1]=1
stat['avg_r2'][stat['avg_r2']>1]=1
stat['avg_nse'][stat['avg_nse']>1]=1
stat['avg_coef'] = stat['avg_coef'].tolist()
stat['avg_r2'] = stat['avg_r2'].tolist()
stat['avg_nse'] = stat['avg_nse'].tolist()
meanDict[stat['pft']] = stat['avg_mean']
stdDict[stat['pft']] = stat['avg_std']
coefDict[stat['pft']] = stat['avg_coef']
r2sDict[stat['pft']] = stat['avg_r2']
rmseDict[stat['pft']] = stat['avg_rmse']
pd_mean = pd.DataFrame(meanDict)*.365
pd_std = pd.DataFrame(stdDict)
pd_coef = pd.DataFrame(coefDict)
pd_r2s = pd.DataFrame(r2sDict)
pd_rmse = pd.DataFrame(rmseDict)
rowIndex = {0: 'IBIS', 1: 'Biome-BGC', 2: 'LPJ', 3: 'MODIS', 4: 'Fluxnet'}
pd_mean.rename(index=rowIndex, inplace=True)
pd_std.rename(index=rowIndex, inplace=True)
pd_coef.rename(index=rowIndex, inplace=True)
pd_r2s.rename(index=rowIndex, inplace=True)
pd_rmse.rename(index=rowIndex, inplace=True)
heatmap(pd_mean, featureName + '-mean')
heatmap(pd_std, featureName + '-std')
heatmap(pd_coef, featureName + '-coef')
heatmap(pd_r2s, featureName + '-R2')
heatmap(pd_rmse, featureName + '-rmse')
print('--------finished')
def plotBar():
with open(dbPath) as load_f:
stats = json.load(load_f)
meanDict = {}
stdDict = {}
coefDict = {}
r2sDict = {}
rmseDict = {}
for stat in stats:
stat['avg_coef'] = | np.array(stat['avg_coef']) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 29 08:55:24 2021
@author: llothar
"""
from sens_tape import tape
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use(['science','no-latex'])
from shapely.geometry import Polygon
from shapely.geometry import LineString
from shapely.ops import unary_union
from shapely.ops import unary_union, polygonize
from sklearn.neighbors import RadiusNeighborsRegressor
from sklearn.neighbors import KNeighborsRegressor
from statistics_module import stats
def myr2(x,y,data_x, data_y):
try:
x1 = np.max(data_x[data_x < x])
x2 = np.min(data_x[data_x > x])
loc1 = np.where(data_x == x1)
loc2 = np.where(data_x == x2)
y1 = data_y[loc1][-1]
y2 = data_y[loc2][0]
m = (y1-y2)/(x1-x2)
b = (x1*y2 - x2*y1)/(x1-x2)
y_inter = m * x + b
return | np.power(y-y_inter, 2) | numpy.power |
import numpy as np
from decimer_segmentation.complete_structure import *
def test_get_bounding_box_center():
# Determine the center of a given polygon bounding box
test_bbox = np.array([[1, 1], [2, 1], [3, 1], [3, 0], [2, 0], [1, 0]])
expected_result = np.array([2, 0.5])
actual_result = get_bounding_box_center(test_bbox)
for index in range(len(expected_result)):
assert expected_result[index] == actual_result[index]
def test_get_edge_line():
# Return intercept and slop for a linear function between 2 points in 2D Space
test_linenode1 = [1, 6]
test_linenode2 = [5, 8]
expected_result = [0.5, 5.5]
actual_result = get_edge_line(test_linenode1, test_linenode2)
for index in range(len(expected_result)):
assert expected_result[index] == actual_result[index]
def test_get_euklidian_distance():
# Calculates euklidian distance between two given points in 2D Space
test_distancepoint1 = [1, 6]
test_distancepoint2 = [5, 8]
expected_result = 4.47213595499958
actual_result = get_euklidian_distance(test_distancepoint1, test_distancepoint2)
assert expected_result == actual_result
def test_set_x_range():
# For the contour-based expansion, non-white pixels on the contours of the original polygon bounding box are detected
test_distance = 3
test_eukl_distance = 4
test_image_array = np.array([[1,5]])
expected_result = [2.5, 2.75, 1., 0.25, 0.75, 0., 2., 2.25, 1.5, 1.75, 1.25, 0.5]
actual_result = set_x_range(test_distance, test_eukl_distance, test_image_array)
assert set(expected_result) == set(actual_result)
def test_get_next_pixel_to_check():
# Returns the next pixel to check in the image
test_bounding_box = np.array([[1,5], [2,4]])
test_node_index = 1
test_step = 4
test_image_shape = [2,4,6]
expected_result = (3, 1)
actual_result = get_next_pixel_to_check(test_bounding_box, test_node_index, test_step, test_image_shape)
for index in range(len(expected_result)):
assert expected_result[index] == actual_result[index]
def test_adapt_x_values():
# Returns a bounding box where the nodes are altered depending on their relative position to bounding box centre
test_bounding_box = | np.array([[1,5], [2,4]]) | numpy.array |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import itertools
import numpy as np
import pytest
import cirq
from cirq.protocols.act_on_protocol_test import DummyActOnArgs
from cirq.testing import (
EqualsTester,
assert_allclose_up_to_global_phase,
)
_bools = (False, True)
_paulis = (cirq.X, cirq.Y, cirq.Z)
def _assert_not_mirror(gate) -> None:
trans_x = gate.transform(cirq.X)
trans_y = gate.transform(cirq.Y)
trans_z = gate.transform(cirq.Z)
right_handed = (
trans_x.flip ^ trans_y.flip ^ trans_z.flip ^ (trans_x.to.relative_index(trans_y.to) != 1)
)
assert right_handed, 'Mirrors'
def _assert_no_collision(gate) -> None:
trans_x = gate.transform(cirq.X)
trans_y = gate.transform(cirq.Y)
trans_z = gate.transform(cirq.Z)
assert trans_x.to != trans_y.to, 'Collision'
assert trans_y.to != trans_z.to, 'Collision'
assert trans_z.to != trans_x.to, 'Collision'
def _all_rotations():
for (
pauli,
flip,
) in itertools.product(_paulis, _bools):
yield cirq.PauliTransform(pauli, flip)
def _all_rotation_pairs():
for px, flip_x, pz, flip_z in itertools.product(_paulis, _bools, _paulis, _bools):
if px == pz:
continue
yield cirq.PauliTransform(px, flip_x), cirq.PauliTransform(pz, flip_z)
def _all_clifford_gates():
for trans_x, trans_z in _all_rotation_pairs():
yield cirq.SingleQubitCliffordGate.from_xz_map(trans_x, trans_z)
@pytest.mark.parametrize('pauli,flip_x,flip_z', itertools.product(_paulis, _bools, _bools))
def test_init_value_error(pauli, flip_x, flip_z):
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_xz_map((pauli, flip_x), (pauli, flip_z))
@pytest.mark.parametrize('trans_x,trans_z', _all_rotation_pairs())
def test_init_from_xz(trans_x, trans_z):
gate = cirq.SingleQubitCliffordGate.from_xz_map(trans_x, trans_z)
assert gate.transform(cirq.X) == trans_x
assert gate.transform(cirq.Z) == trans_z
_assert_not_mirror(gate)
_assert_no_collision(gate)
@pytest.mark.parametrize(
'trans1,trans2,from1',
(
(trans1, trans2, from1)
for trans1, trans2, from1 in itertools.product(_all_rotations(), _all_rotations(), _paulis)
if trans1.to != trans2.to
),
)
def test_init_from_double_map_vs_kwargs(trans1, trans2, from1):
from2 = cirq.Pauli.by_relative_index(from1, 1)
from1_str, from2_str = (str(frm).lower() + '_to' for frm in (from1, from2))
gate_kw = cirq.SingleQubitCliffordGate.from_double_map(**{from1_str: trans1, from2_str: trans2})
gate_map = cirq.SingleQubitCliffordGate.from_double_map({from1: trans1, from2: trans2})
# Test initializes the same gate
assert gate_kw == gate_map
# Test initializes what was expected
assert gate_map.transform(from1) == trans1
assert gate_map.transform(from2) == trans2
_assert_not_mirror(gate_map)
_assert_no_collision(gate_map)
@pytest.mark.parametrize(
'trans1,from1',
((trans1, from1) for trans1, from1 in itertools.product(_all_rotations(), _paulis)),
)
def test_init_from_double_invalid(trans1, from1):
from2 = cirq.Pauli.by_relative_index(from1, 1)
# Test throws on invalid arguments
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_double_map({from1: trans1, from2: trans1})
@pytest.mark.parametrize('trans,frm', itertools.product(_all_rotations(), _paulis))
def test_init_from_single_map_vs_kwargs(trans, frm):
from_str = str(frm).lower() + '_to'
# pylint: disable=unexpected-keyword-arg
gate_kw = cirq.SingleQubitCliffordGate.from_single_map(**{from_str: trans})
gate_map = cirq.SingleQubitCliffordGate.from_single_map({frm: trans})
assert gate_kw == gate_map
@pytest.mark.parametrize(
'trans,frm',
(
(trans, frm)
for trans, frm in itertools.product(_all_rotations(), _paulis)
if trans.to != frm
),
)
def test_init_90rot_from_single(trans, frm):
gate = cirq.SingleQubitCliffordGate.from_single_map({frm: trans})
assert gate.transform(frm) == trans
_assert_not_mirror(gate)
_assert_no_collision(gate)
# Check that it decomposes to one gate
assert len(gate.decompose_rotation()) == 1
# Check that this is a 90 degree rotation gate
assert (
gate.merged_with(gate).merged_with(gate).merged_with(gate) == cirq.SingleQubitCliffordGate.I
)
# Check that flipping the transform produces the inverse rotation
trans_rev = cirq.PauliTransform(trans.to, not trans.flip)
gate_rev = cirq.SingleQubitCliffordGate.from_single_map({frm: trans_rev})
assert gate**-1 == gate_rev
@pytest.mark.parametrize(
'trans,frm',
(
(trans, frm)
for trans, frm in itertools.product(_all_rotations(), _paulis)
if trans.to == frm and trans.flip
),
)
def test_init_180rot_from_single(trans, frm):
gate = cirq.SingleQubitCliffordGate.from_single_map({frm: trans})
assert gate.transform(frm) == trans
_assert_not_mirror(gate)
_assert_no_collision(gate)
# Check that it decomposes to one gate
assert len(gate.decompose_rotation()) == 1
# Check that this is a 180 degree rotation gate
assert gate.merged_with(gate) == cirq.SingleQubitCliffordGate.I
@pytest.mark.parametrize(
'trans,frm',
(
(trans, frm)
for trans, frm in itertools.product(_all_rotations(), _paulis)
if trans.to == frm and not trans.flip
),
)
def test_init_ident_from_single(trans, frm):
gate = cirq.SingleQubitCliffordGate.from_single_map({frm: trans})
assert gate.transform(frm) == trans
_assert_not_mirror(gate)
_assert_no_collision(gate)
# Check that it decomposes to zero gates
assert len(gate.decompose_rotation()) == 0
# Check that this is an identity gate
assert gate == cirq.SingleQubitCliffordGate.I
@pytest.mark.parametrize(
'pauli,sqrt,expected',
(
(cirq.X, False, cirq.SingleQubitCliffordGate.X),
(cirq.Y, False, cirq.SingleQubitCliffordGate.Y),
(cirq.Z, False, cirq.SingleQubitCliffordGate.Z),
(cirq.X, True, cirq.SingleQubitCliffordGate.X_sqrt),
(cirq.Y, True, cirq.SingleQubitCliffordGate.Y_sqrt),
(cirq.Z, True, cirq.SingleQubitCliffordGate.Z_sqrt),
),
)
def test_init_from_pauli(pauli, sqrt, expected):
gate = cirq.SingleQubitCliffordGate.from_pauli(pauli, sqrt=sqrt)
assert gate == expected
def test_pow():
assert cirq.SingleQubitCliffordGate.X**-1 == cirq.SingleQubitCliffordGate.X
assert cirq.SingleQubitCliffordGate.H**-1 == cirq.SingleQubitCliffordGate.H
assert cirq.SingleQubitCliffordGate.X_sqrt == cirq.SingleQubitCliffordGate.X**0.5
assert cirq.SingleQubitCliffordGate.Y_sqrt == cirq.SingleQubitCliffordGate.Y**0.5
assert cirq.SingleQubitCliffordGate.Z_sqrt == cirq.SingleQubitCliffordGate.Z**0.5
assert cirq.SingleQubitCliffordGate.X_nsqrt == cirq.SingleQubitCliffordGate.X**-0.5
assert cirq.SingleQubitCliffordGate.Y_nsqrt == cirq.SingleQubitCliffordGate.Y**-0.5
assert cirq.SingleQubitCliffordGate.Z_nsqrt == cirq.SingleQubitCliffordGate.Z**-0.5
assert cirq.SingleQubitCliffordGate.X_sqrt**-1 == cirq.SingleQubitCliffordGate.X_nsqrt
assert cirq.inverse(cirq.SingleQubitCliffordGate.X_nsqrt) == (
cirq.SingleQubitCliffordGate.X_sqrt
)
with pytest.raises(TypeError):
_ = cirq.SingleQubitCliffordGate.Z**0.25
def test_init_from_quarter_turns():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 0),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, 0),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, 0),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 4),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, 4),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, 4),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 8),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, 8),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, 8),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, -4),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, -4),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, -4),
)
eq.add_equality_group(
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 1),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 5),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 9),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, -3),
)
eq.add_equality_group(
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, 1),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, 5),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, 9),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Y, -3),
)
eq.add_equality_group(
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, 1),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, 5),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, 9),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.Z, -3),
)
eq.add_equality_group(
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 2),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 6),
)
eq.add_equality_group(
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 3),
cirq.SingleQubitCliffordGate.from_quarter_turns(cirq.X, 7),
)
@pytest.mark.parametrize('gate', _all_clifford_gates())
def test_init_from_quarter_turns_reconstruct(gate):
new_gate = functools.reduce(
cirq.SingleQubitCliffordGate.merged_with,
(
cirq.SingleQubitCliffordGate.from_quarter_turns(pauli, qt)
for pauli, qt in gate.decompose_rotation()
),
cirq.SingleQubitCliffordGate.I,
)
assert gate == new_gate
def test_init_invalid():
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_single_map()
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_single_map({})
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_single_map(
{cirq.X: (cirq.X, False)}, y_to=(cirq.Y, False)
)
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_single_map(
{cirq.X: (cirq.X, False), cirq.Y: (cirq.Y, False)}
)
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_double_map()
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_double_map({})
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_double_map({cirq.X: (cirq.X, False)})
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_double_map(x_to=(cirq.X, False))
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_single_map(
{cirq.X: (cirq.Y, False), cirq.Y: (cirq.Z, False), cirq.Z: (cirq.X, False)}
)
with pytest.raises(ValueError):
cirq.SingleQubitCliffordGate.from_single_map(
{cirq.X: (cirq.X, False), cirq.Y: (cirq.X, False)}
)
def test_eq_ne_and_hash():
eq = EqualsTester()
for trans_x, trans_z in _all_rotation_pairs():
gate_gen = lambda: cirq.SingleQubitCliffordGate.from_xz_map(trans_x, trans_z)
eq.make_equality_group(gate_gen)
@pytest.mark.parametrize(
'gate,rep',
(
(cirq.SingleQubitCliffordGate.I, 'cirq.SingleQubitCliffordGate(X:+X, Y:+Y, Z:+Z)'),
(cirq.SingleQubitCliffordGate.H, 'cirq.SingleQubitCliffordGate(X:+Z, Y:-Y, Z:+X)'),
(cirq.SingleQubitCliffordGate.X, 'cirq.SingleQubitCliffordGate(X:+X, Y:-Y, Z:-Z)'),
(cirq.SingleQubitCliffordGate.X_sqrt, 'cirq.SingleQubitCliffordGate(X:+X, Y:+Z, Z:-Y)'),
),
)
def test_repr(gate, rep):
assert repr(gate) == rep
@pytest.mark.parametrize(
'gate,trans_y',
(
(cirq.SingleQubitCliffordGate.I, (cirq.Y, False)),
(cirq.SingleQubitCliffordGate.H, (cirq.Y, True)),
(cirq.SingleQubitCliffordGate.X, (cirq.Y, True)),
(cirq.SingleQubitCliffordGate.Y, (cirq.Y, False)),
(cirq.SingleQubitCliffordGate.Z, (cirq.Y, True)),
(cirq.SingleQubitCliffordGate.X_sqrt, (cirq.Z, False)),
(cirq.SingleQubitCliffordGate.X_nsqrt, (cirq.Z, True)),
(cirq.SingleQubitCliffordGate.Y_sqrt, (cirq.Y, False)),
(cirq.SingleQubitCliffordGate.Y_nsqrt, (cirq.Y, False)),
(cirq.SingleQubitCliffordGate.Z_sqrt, (cirq.X, True)),
(cirq.SingleQubitCliffordGate.Z_nsqrt, (cirq.X, False)),
),
)
def test_y_rotation(gate, trans_y):
assert gate.transform(cirq.Y) == trans_y
@pytest.mark.parametrize(
'gate,gate_equiv',
(
(cirq.SingleQubitCliffordGate.I, cirq.X**0),
(cirq.SingleQubitCliffordGate.H, cirq.H),
(cirq.SingleQubitCliffordGate.X, cirq.X),
(cirq.SingleQubitCliffordGate.Y, cirq.Y),
(cirq.SingleQubitCliffordGate.Z, cirq.Z),
(cirq.SingleQubitCliffordGate.X_sqrt, cirq.X**0.5),
(cirq.SingleQubitCliffordGate.X_nsqrt, cirq.X**-0.5),
(cirq.SingleQubitCliffordGate.Y_sqrt, cirq.Y**0.5),
(cirq.SingleQubitCliffordGate.Y_nsqrt, cirq.Y**-0.5),
(cirq.SingleQubitCliffordGate.Z_sqrt, cirq.Z**0.5),
(cirq.SingleQubitCliffordGate.Z_nsqrt, cirq.Z**-0.5),
),
)
def test_decompose(gate, gate_equiv):
q0 = cirq.NamedQubit('q0')
mat = cirq.Circuit(gate(q0)).unitary()
mat_check = cirq.Circuit(
gate_equiv(q0),
).unitary()
assert_allclose_up_to_global_phase(mat, mat_check, rtol=1e-7, atol=1e-7)
@pytest.mark.parametrize(
'gate,gate_equiv',
(
(cirq.SingleQubitCliffordGate.I, cirq.X**0),
(cirq.SingleQubitCliffordGate.H, cirq.H),
(cirq.SingleQubitCliffordGate.X, cirq.X),
(cirq.SingleQubitCliffordGate.Y, cirq.Y),
(cirq.SingleQubitCliffordGate.Z, cirq.Z),
(cirq.SingleQubitCliffordGate.X_sqrt, cirq.X**0.5),
(cirq.SingleQubitCliffordGate.X_nsqrt, cirq.X**-0.5),
(cirq.SingleQubitCliffordGate.Y_sqrt, cirq.Y**0.5),
(cirq.SingleQubitCliffordGate.Y_nsqrt, cirq.Y**-0.5),
(cirq.SingleQubitCliffordGate.Z_sqrt, cirq.Z**0.5),
(cirq.SingleQubitCliffordGate.Z_nsqrt, cirq.Z**-0.5),
),
)
def test_known_matrix(gate, gate_equiv):
assert cirq.has_unitary(gate)
mat = cirq.unitary(gate)
mat_check = cirq.unitary(gate_equiv)
assert_allclose_up_to_global_phase(mat, mat_check, rtol=1e-7, atol=1e-7)
@pytest.mark.parametrize('gate', _all_clifford_gates())
def test_inverse(gate):
assert gate == cirq.inverse(cirq.inverse(gate))
@pytest.mark.parametrize('gate', _all_clifford_gates())
def test_inverse_matrix(gate):
q0 = cirq.NamedQubit('q0')
mat = cirq.Circuit(gate(q0)).unitary()
mat_inv = cirq.Circuit(cirq.inverse(gate)(q0)).unitary()
assert_allclose_up_to_global_phase(mat, mat_inv.T.conj(), rtol=1e-7, atol=1e-7)
def test_commutes_notimplemented_type():
with pytest.raises(TypeError):
cirq.commutes(cirq.SingleQubitCliffordGate.X, 'X')
assert cirq.commutes(cirq.SingleQubitCliffordGate.X, 'X', default='default') == 'default'
with pytest.raises(TypeError):
cirq.commutes(cirq.CliffordGate.X, 'X')
assert cirq.commutes(cirq.CliffordGate.X, 'X', default='default') == 'default'
@pytest.mark.parametrize(
'gate,other', itertools.product(_all_clifford_gates(), _all_clifford_gates())
)
def test_commutes_single_qubit_gate(gate, other):
q0 = cirq.NamedQubit('q0')
gate_op = gate(q0)
other_op = other(q0)
mat = cirq.Circuit(
gate_op,
other_op,
).unitary()
mat_swap = cirq.Circuit(
other_op,
gate_op,
).unitary()
commutes = cirq.commutes(gate, other)
commutes_check = cirq.allclose_up_to_global_phase(mat, mat_swap)
assert commutes == commutes_check
# Test after switching order
mat_swap = cirq.Circuit(
gate.equivalent_gate_before(other)(q0),
gate_op,
).unitary()
assert_allclose_up_to_global_phase(mat, mat_swap, rtol=1e-7, atol=1e-7)
@pytest.mark.parametrize('gate', _all_clifford_gates())
def test_parses_single_qubit_gate(gate):
assert gate == cirq.read_json(json_text=(cirq.to_json(gate)))
@pytest.mark.parametrize(
'gate,pauli,half_turns',
itertools.product(_all_clifford_gates(), _paulis, (1.0, 0.25, 0.5, -0.5)),
)
def test_commutes_pauli(gate, pauli, half_turns):
# TODO(#4328) cirq.X**1 should be _PauliX instead of XPowGate
pauli_gate = pauli if half_turns == 1 else pauli**half_turns
q0 = cirq.NamedQubit('q0')
mat = cirq.Circuit(
gate(q0),
pauli_gate(q0),
).unitary()
mat_swap = cirq.Circuit(
pauli_gate(q0),
gate(q0),
).unitary()
commutes = cirq.commutes(gate, pauli_gate)
commutes_check = | np.allclose(mat, mat_swap) | numpy.allclose |
from CHECLabPy.plotting.setup import Plotter
from CHECLabPy.plotting.camera import CameraImage
from CHECLabPy.utils.files import create_directory
from CHECLabPy.utils.mapping import get_ctapipe_camera_geometry
from sstcam_sandbox import get_plot, get_data
from os.path import join
from matplotlib import pyplot as plt
from tqdm import tqdm
import numpy as np
import pandas as pd
import warnings
from CHECOnsky.calib import obtain_cleaning_mask
from CHECLabPy.calib import TimeCalibrator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from IPython import embed
def colorbar(mappable, label):
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
_ = divider.append_axes("right", size="10%", pad=0.15)
cax = divider.append_axes("right", size="10%", pad=0.15)
return fig.colorbar(mappable, label=label, cax=cax, aspect=20)
class CameraMovie(Plotter):
def __init__(self, mapping, output_path):
super().__init__()
self.fig = plt.figure(figsize=(8, 3))
self.ax_goldfish = self.fig.add_axes([0, 0, 0.4, 1])
self.ax_image = self.fig.add_axes([0.4, 0, 0.4, 1])
self.ax_cb = self.fig.add_axes([0.68, 0, 0.15, 1])
self.ax_image.patch.set_alpha(0)
self.ax_cb.patch.set_alpha(0)
self.ax_cb.axis('off')
self.ci_image = CameraImage.from_mapping(mapping, ax=self.ax_image)
self.ci_image.add_colorbar(
"Pixel Amplitude (p.e.)", ax=self.ax_cb, pad=-0.5
)
self.ci_goldfish = CameraImage.from_mapping(mapping, ax=self.ax_goldfish)
self.output_path = output_path
self.source_point_image = None
self.source_point_goldfish = None
self.source_label_image = None
self.source_label_goldfish = None
self.alpha_line = None
self.timestamp = None
self.iframe = 0
def set_source_position(self, x_src, y_src):
offset = 0.004
if self.source_point_image is None:
self.source_point_image, = self.ax_image.plot(
x_src, y_src, 'x', c='red'
)
self.source_label_image = self.ax_image.text(
x_src+offset, y_src+offset, "Mrk421", color='red', size=10
)
else:
self.source_point_image.set_xdata(x_src)
self.source_point_image.set_ydata(y_src)
self.source_label_image.set_position((x_src+offset, y_src+offset))
if self.source_point_goldfish is None:
self.source_point_goldfish, = self.ax_goldfish.plot(
x_src, y_src, 'x', c='red'
)
self.source_label_goldfish = self.ax_goldfish.text(
x_src+offset, y_src+offset, "Mrk421", color='red', size=10
)
else:
self.source_point_goldfish.set_xdata(x_src)
self.source_point_goldfish.set_ydata(y_src)
self.source_label_goldfish.set_position((x_src+offset, y_src+offset))
def set_timestamp(self, timestamp):
timestamp_str = str(timestamp)
timestamp_len = len(timestamp_str)
missing = 29 - timestamp_len
timestamp_str += "0" * missing
if self.timestamp is None:
self.timestamp = self.fig.text(
0.4, -0.1, timestamp_str, horizontalalignment='center', size=12
)
else:
self.timestamp.set_text(timestamp_str)
def set_image(self, image, min_=None, max_=None):
self.ci_image.image = image
self.ci_image.set_limits_minmax(min_, max_)
def set_goldfish(self, slice, min_=None, max_=None):
self.ci_goldfish.image = slice
self.ci_goldfish.set_limits_minmax(min_, max_)
def set_alpha_line(self, cog_x, cog_y, psi):
y_min, y_max = self.ax_image.get_ylim()
x_min = cog_x - (cog_y - y_min) / np.tan(psi)
x_max = cog_x - (cog_y - y_max) / np.tan(psi)
if self.alpha_line is None:
self.alpha_line, = self.ax_image.plot(
[x_min, x_max], [y_min, y_max], ls="--", c='red'
)
else:
self.alpha_line.set_xdata([x_min, x_max])
self.alpha_line.set_ydata([y_min, y_max])
def save_frame(self):
path = self.output_path.format(self.iframe)
self.fig.savefig(path, bbox_inches='tight')
self.iframe += 1
def main():
path = get_data("d190717_alpha/wobble.h5")
with pd.HDFStore(path, mode='r') as store:
df = store['data'].loc[::4]
mapping = store['mapping']
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
mapping.metadata = store.get_storer('mapping').attrs.metadata
tc = TimeCalibrator()
geom = get_ctapipe_camera_geometry(mapping)
n_row = df.index.size
p_camera = CameraMovie(mapping, get_plot(
"d190717_alpha/wobble_animation_goldfish/frames/{:04d}.png"
))
for _, row in tqdm(df.iterrows(), total=n_row):
timestamp = row['timestamp']
iobs = row['iobs']
iev = row['iev']
x_src = row['x_src']
y_src = row['y_src']
dl1 = row['dl1'].values
time = row['dl1_pulse_time'].values
r1 = row['r1']
x_cog = row['x_cog']
y_cog = row['y_cog']
psi = row['psi']
p_camera.set_source_position(x_src, y_src)
n_pixels, n_samples = r1.shape
shifted = tc(r1)
mask = obtain_cleaning_mask(geom, dl1, time)
if not mask.any():
msg = f"No pixels survived cleaning for: RUN {iobs} IEV {iev}"
print(msg)
continue
# raise ValueError(msg)
dl1_ma = | np.ma.masked_array(dl1, mask=~mask) | numpy.ma.masked_array |
"""
Dedalus script for Balanced Hasegawa-Wakatani equations
From Majda PoP 2018
This script can be ran serially or in parallel, and uses the built-in analysis
framework to save data snapshots in HDF5 files. The `merge_procs` command can
be used to merge distributed analysis sets from parallel runs, and the
`plot_slices.py` script can be used to plot the snapshots.
To run, merge, and plot using 4 processes, for instance, you could use:
$ mpiexec -n 4 python3 rayleigh_benard.py
$ mpiexec -n 4 python3 -m dedalus merge_procs snapshots
$ mpiexec -n 4 python3 plot_slices.py snapshots/*.h5
This script can restart the simulation from the last save of the original
output to extend the integration. This requires that the output files from
the original simulation are merged, and the last is symlinked or copied to
`restart.h5`.
To run the original example and the restart, you could use:
$ mpiexec -n 4 python3 rayleigh_benard.py
$ mpiexec -n 4 python3 -m dedalus merge_procs snapshots
$ ln -s snapshots/snapshots_s2.h5 restart.h5
$ mpiexec -n 4 python3 rayleigh_benard.py
The simulations should take a few process-minutes to run.
"""
import numpy as np
from mpi4py import MPI
import time
import pathlib
from dedalus import public as de
from dedalus.extras import flow_tools
from dedalus.core import operators
import h5py
import logging
logger = logging.getLogger(__name__)
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# Parameters
Lx, Ly = (2*np.pi, 2*np.pi)
Nx, Ny = (2048, 2048)
Beta = 8.0
Viscosity = 1.5e-8
Friction = 1.2e-3
# Create bases and domain
x_basis = de.Fourier('x', Nx, interval=(-Lx/2, Lx/2), dealias=3/2)
y_basis = de.Fourier('y', Ny, interval=(-Ly/2, Ly/2), dealias=3/2)
domain = de.Domain([x_basis, y_basis], grid_dtype=np.float64)
# Set up k grid
nx_global = np.array(list(range(Nx//2)))
ny_global = np.array(list(range(Ny//2))+list(range(-Ny//2+1,0)))
nyg_global, nxg_global = np.meshgrid(ny_global, nx_global)
n2_global = nyg_global**2 + nxg_global**2
ky_global, kx_global = np.meshgrid(2*np.pi*ny_global/Ly, 2*np.pi*nx_global/Lx)
k2_global = kx_global**2+ky_global**2
# Set up correlation function for the random forcing
corr_func = np.logical_and(n2_global >= 14**2, n2_global <= 15**2)*1.0
# Inverse laplacian
invlap = np.zeros(k2_global.shape)
invlap[k2_global>0] = 1.0 / k2_global[k2_global>0]
# Compute (lap^-1 C) (0), which is the second line
invlap_corr_func = invlap * corr_func
invlap_corr_total = np.sum(invlap_corr_func[nxg_global>0]*2) + np.sum(invlap_corr_func[nxg_global==0])
# Adjustment factor to match Bouchet choice -- old choice, incorrect
#corr_func = (corr_func / invlap_corr_total) / (0.5 * Lx * Ly)
# Adjustment factor to match Bouchet choice -- new choice, correct and leads to <v^2/2> = 1
corr_func = (corr_func / invlap_corr_total)
# Set up sampling pattern for the random forcing function
cshape = domain.dist.coeff_layout.local_shape(scales=1)
cslice = domain.dist.coeff_layout.slices(scales=1)
# Have to treat nx=0 case carefully, so split out the nx=0 forced modes
nxg_local = nxg_global[cslice]
nyg_local = nyg_global[cslice]
corr_func_local = corr_func[cslice]
forced_local = corr_func_local > 1e-16
# Check if any modes are forced at all
forced_local_nxp = np.logical_and(forced_local, nxg_local > 0)
num_forced_nxp = np.sum(forced_local_nxp)
if num_forced_nxp > 0:
forced_where = np.where(forced_local_nxp)
forcing_amps = np.sqrt(corr_func_local[forced_where])
# In the nx=0 case, split into ny<0 and ny>0 modes
forced_local_nx0 = np.logical_and(forced_local, nxg_local == 0)
num_forced_nx0 = np.sum(forced_local_nx0)//2
if num_forced_nx0 > 0:
forced_where_nx0p = np.where(np.logical_and(forced_local_nx0, nyg_local > 0))
forced_where_nx0m = np.where(np.logical_and(forced_local_nx0, nyg_local < 0))
forcing_amps_nx0 = np.sqrt(corr_func_local[forced_where_nx0p])
# Set up empty array for random force
randomforce = np.zeros(cshape, dtype=np.complex)
rng = np.random.default_rng()
def forcing(deltaT):
if num_forced_nxp > 0:
noise_r = rng.standard_normal(num_forced_nxp)
noise_i = rng.standard_normal(num_forced_nxp)
randomforce[forced_where] = (noise_r+1j*noise_i)*forcing_amps
if num_forced_nx0 > 0:
noise_r = rng.standard_normal(num_forced_nx0)
noise_i = rng.standard_normal(num_forced_nx0)
randomforce[forced_where_nx0p] = (noise_r+1j*noise_i)*forcing_amps_nx0
randomforce[forced_where_nx0m] = (noise_r-1j*noise_i)*forcing_amps_nx0
return randomforce/np.sqrt(deltaT)
forcing_func = operators.GeneralFunction(domain, 'c', forcing, args=[])
# Set up problem equations
problem = de.IVP(domain, variables=['psi', 'vx', 'vy', 'q'])
problem.parameters['Bt'] = Beta
problem.parameters['Mu'] = Viscosity
problem.parameters['Al'] = Friction
problem.parameters['Sq2Al'] = np.sqrt(2.0*Friction)
problem.parameters['Ly'] = Ly
problem.parameters['Lx'] = Lx
problem.parameters['forcing_func'] = forcing_func
problem.substitutions['Lap(A)'] = "dx(dx(A)) + dy(dy(A))"
problem.add_equation("dt(q) + Mu*Lap(Lap(q)) + Al*q - Bt*dy(psi) = -(vx*dx(q) + vy*dy(q)) + Sq2Al*forcing_func")
problem.add_equation("q - Lap(psi) = 0", condition="(nx!=0) or (ny!=0)")
problem.add_equation("psi = 0", condition="(nx==0) and (ny==0)")
problem.add_equation("vy - dx(psi) = 0")
problem.add_equation("vx + dy(psi) = 0")
# Build solver
solver = problem.build_solver(de.timesteppers.MCNAB2)
logger.info('Solver built')
timestep = 2e-5
max_timestep = 0.2
#snapshotStep = 0.0005
snapshotStep = 50.0
# Initial conditions or restart
if not pathlib.Path('restart.h5').exists():
q = solver.state['q']
#ff = forcing(1.0)
q['c'] = forcing(1.0)*np.sqrt(2.0*Friction) + np.logical_and(nxg_local==4, nyg_local==0)*1.5
# Timestepping and output
dt = timestep
stop_sim_time = 3600.1
fh_mode = 'overwrite'
else:
# Restart
#write, last_dt = solver.load_state('restart.h5', -1)
logger.info("Loading solver state from: restart.h5")
with h5py.File('restart.h5', mode='r') as f:
write = f['scales']['write_number'][-1]
last_dt = f['scales']['timestep'][-1]
solver.iteration = solver.inital_iteration = f['scales']['iteration'][-1]
solver.sim_time = solver.initial_sim_time = f['scales']['sim_time'][-1]
logger.info("Loading iteration: {}".format(solver.iteration))
logger.info("Loading write: {}".format(write))
logger.info("Loading sim time: {}".format(solver.sim_time))
logger.info("Loading timestep: {}".format(last_dt))
q = solver.state['q']
psi = solver.state['psi']
vx = solver.state['vx']
vy = solver.state['vy']
last_q = f['tasks']['q'][-1,:,:]
# Note: I'm not really sure what the conventions for the DFT used in dedalus are, so I use numpy instead
np_kx = np.fft.fftfreq(Nx, Lx/Nx/2/np.pi)
np_ky = np.fft.rfftfreq(Ny, Ly/Ny/2/np.pi)
np_kxg, np_kyg = np.meshgrid(np_kx, np_ky, indexing='ij')
np_k2 = np_kxg**2 + np_kyg**2
invlap_np = np.zeros(np_k2.shape)
invlap_np[np_k2>0] = -1.0 / np_k2[np_k2>0]
last_qfft = np.fft.rfft2(last_q)
last_psifft = invlap_np*last_qfft
last_vxfft = 1j*np_kyg*last_psifft
last_vyfft = -1j*np_kxg*last_psifft
last_psi = np.fft.irfft2(last_psifft)
last_vx = | np.fft.irfft2(last_vxfft) | numpy.fft.irfft2 |
import csv
import cv2
import numpy as np
import ntpath
import time
import pickle
import sklearn
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Convolution2D, Dropout
######################################################################################
### Settings
# use_datasets = [0,1,2] # Array to select which datasets to process
use_datasets = [3] # Array to select which datasets to process
nb_epochs = 7 # Number of epochs for neural network training
batch_sz = 32 # Batch size for neural network
test_sz = 0.20 # Fraction of images to use for test set
steer_corr = 0.00 # Steering correction value (left, right camera)
dropout_keep_rate = 0.7 # Dropout rate to keep neurons
### List of available datasets
datasets = np.array([ 'data/Udacity/driving_log.csv', \
'data/T1_Regular/driving_log.csv', \
'data/T1_OtherDir/driving_log.csv', \
'data/driving_log.csv'], \
dtype='str')
datasets = datasets[use_datasets]
################################################################################
### Function to read in image files and store them as pickle file
def read_csvs(datasets, steer_corr):
### Open CSV files of provided datasets
images = []
measurements = []
for csvpath in datasets:
with open(csvpath) as csvfile:
# with open('data/Udacity/driving_log.csv') as csvfile:
print('Processing file "',csvpath,'"... ',sep='',end='')
reader = csv.reader(csvfile, skipinitialspace=True)
j = 0
for line in reader:
if j == 0:
lines_header = line
j = j + 1
continue
else:
# Update image file paths for center, left and right cam
line[0] = ntpath.split(csvpath)[0] + \
'/IMG/'+ntpath.split(line[0])[-1] # center
line[1] = ntpath.split(csvpath)[0] + \
'/IMG/'+ntpath.split(line[1])[-1] # left
line[2] = ntpath.split(csvpath)[0] + \
'/IMG/'+ntpath.split(line[2])[-1] # right
# Add image path information
images.extend([line[0],line[1],line[2]])
# Add steering angle information
measurements.extend([
float(line[3]),
float(line[3])+steer_corr,
float(line[3])-steer_corr])
print('DONE!')
print(' Total amount of datasets is now at',len(images),
'and',len(measurements),'steering infos')
return images, measurements
################################################################################
### Read in files
images, steer = read_csvs(datasets, steer_corr)
### Split datasets between "train" and "test" dataset
images_train, images_test, steer_train, steer_test = train_test_split(
images,
steer,
test_size=test_sz,
random_state=42)
################################################################################
### Keras Neural Network
# Print out
print("\nStarting Keras")
print(" 'X_train' and 'y_train' with {} elements\n".format(len(images_train)))
print(" 'X_test' and 'y_test' with {} elements\n".format(len(images_test)))
################################################################################
### Define generator function
# Generator is used to read in image files during batch creation. Reason for
# introducing this feature was memory limitations: model is now able to process
# as many image files as needed!
def generator(datasets, steer, batch_size):
num_samples = len(datasets)
while 1: # Loop forever so the generator never terminates
datasets, steer = sklearn.utils.shuffle(datasets, steer)
for offset in range(0, num_samples, int(batch_size/2)):
# Half batch size is used, because images are augmented with
# vertically rotated pictures --> multiplier of 2, but half as
# many pictures should be read in one batch
batch_datasets = datasets[offset:offset+int(batch_size/2)]
batch_steers = steer[offset:offset+int(batch_size/2)]
images = []
for batch_dataset in batch_datasets:
image = cv2.cvtColor(cv2.imread(batch_dataset),cv2.COLOR_BGR2RGB)
image_aug = cv2.flip(image,1)
images.append(image)
images.append(image_aug)
angles = []
for batch_steer in batch_steers:
angles.extend([float(batch_steer),float(batch_steer)*-1])
X_batch = np.array(images,dtype='float64')
y_batch = | np.array(angles,dtype='float64') | numpy.array |
"""
Tools for DESI spectroperfectionism extractions implemented for a CPU
"""
import sys
import numpy as np
from numpy.polynomial.legendre import legvander, legval
from numpy.polynomial import hermite_e as He
import scipy.special
import numba
#-------------------------------------------------------------------------
def evalcoeffs(psfdata, wavelengths, specmin=0, nspec=None):
'''
evaluate PSF coefficients parameterized as Legendre polynomials
Args:
psfdata: PSF data from io.read_psf() of Gauss Hermite PSF file
wavelengths: 1D array of wavelengths
Options:
specmin: first spectrum to include
nspec: number of spectra to include (default: all)
Returns a dictionary params[paramname] = value[nspec, nwave]
The Gauss Hermite coefficients are treated differently:
params['GH'] = value[i,j,nspec,nwave]
The dictionary also contains scalars with the recommended spot size
2*(HSIZEX, HSIZEY)+1 and Gauss-Hermite degrees GHDEGX, GHDEGY
(which is also derivable from the dimensions of params['GH'])
'''
if nspec is None:
nspec = psfdata['PSF']['COEFF'].shape[1]
p = dict(WAVE=wavelengths)
#- Evaluate X and Y which have different dimensionality from the
#- PSF coefficients (and might have different WAVEMIN, WAVEMAX)
meta = psfdata['XTRACE'].meta
wavemin, wavemax = meta['WAVEMIN'], meta['WAVEMAX']
ww = (wavelengths - wavemin) * (2.0 / (wavemax - wavemin)) - 1.0
p['X'] = legval(ww, psfdata['XTRACE']['X'][specmin:specmin+nspec].T)
meta = psfdata['YTRACE'].meta
wavemin, wavemax = meta['WAVEMIN'], meta['WAVEMAX']
ww = (wavelengths - wavemin) * (2.0 / (wavemax - wavemin)) - 1.0
p['Y'] = legval(ww, psfdata['YTRACE']['Y'][specmin:specmin+nspec].T)
#- Evaluate the remaining PSF coefficients with a shared dimensionality
#- and WAVEMIN, WAVEMAX
meta = psfdata['PSF'].meta
wavemin, wavemax = meta['WAVEMIN'], meta['WAVEMAX']
ww = (wavelengths - wavemin) * (2.0 / (wavemax - wavemin)) - 1.0
L = np.polynomial.legendre.legvander(ww, meta['LEGDEG'])
nparam = psfdata['PSF']['COEFF'].shape[0]
ndeg = psfdata['PSF']['COEFF'].shape[2]
nwave = L.shape[0]
nghx = meta['GHDEGX']+1
nghy = meta['GHDEGY']+1
p['GH'] = np.zeros((nghx, nghy, nspec, nwave))
for name, coeff in zip(psfdata['PSF']['PARAM'], psfdata['PSF']['COEFF']):
name = name.strip()
coeff = coeff[specmin:specmin+nspec]
if name.startswith('GH-'):
i, j = map(int, name.split('-')[1:3])
p['GH'][i,j] = L.dot(coeff.T).T
else:
p[name] = L.dot(coeff.T).T
#- Include some additional keywords that we'll need
for key in ['HSIZEX', 'HSIZEY', 'GHDEGX', 'GHDEGY']:
p[key] = meta[key]
return p
def calc_pgh(ispec, wavelengths, psfparams):
'''
Calculate pixelated Gauss Hermite for all wavelengths of a single spectrum
Args:
ispec : integer spectrum number
wavelengths : array of wavelengths to evaluate
psfparams : dictionary of PSF parameters returned by evalcoeffs
returns pGHx, pGHy
where pGHx[ghdeg+1, nwave, nbinsx] contains the pixel-integrated
Gauss-Hermite polynomial for all degrees at all wavelengths across
nbinsx bins spaning the PSF spot, and similarly for pGHy. The core
PSF will then be evaluated as
PSFcore = sum_ij c_ij outer(pGHy[j], pGHx[i])
'''
#- shorthand
p = psfparams
#- spot size (ny,nx)
nx = 2*p['HSIZEX']+1
ny = 2*p['HSIZEY']+1
nwave = len(wavelengths)
# print('Spot size (ny,nx) = {},{}'.format(ny, nx))
# print('nwave = {}'.format(nwave))
#- x and y edges of bins that span the center of the PSF spot
xedges = np.repeat(np.arange(nx+1) - nx//2 - 0.5, nwave).reshape(nx+1, nwave)
yedges = np.repeat(np.arange(ny+1) - ny//2 - 0.5, nwave).reshape(ny+1, nwave)
#- Shift to be relative to the PSF center and normalize
#- by the PSF sigma (GHSIGX, GHSIGY).
#- Note: x,y = 0,0 is center of pixel 0,0 not corner
#- Dimensions: xedges[nx+1, nwave], yedges[ny+1, nwave]
dx = (p['X'][ispec]+0.5)%1 - 0.5
dy = (p['Y'][ispec]+0.5)%1 - 0.5
xedges = ((xedges - dx)/p['GHSIGX'][ispec])
yedges = ((yedges - dy)/p['GHSIGY'][ispec])
# print('xedges.shape = {}'.format(xedges.shape))
# print('yedges.shape = {}'.format(yedges.shape))
#- Degree of the Gauss-Hermite polynomials
ghdegx = p['GHDEGX']
ghdegy = p['GHDEGY']
#- Evaluate the Hermite polynomials at the pixel edges
#- HVx[ghdegx+1, nwave, nx+1]
#- HVy[ghdegy+1, nwave, ny+1]
HVx = He.hermevander(xedges, ghdegx).T
HVy = He.hermevander(yedges, ghdegy).T
# print('HVx.shape = {}'.format(HVx.shape))
# print('HVy.shape = {}'.format(HVy.shape))
#- Evaluate the Gaussians at the pixel edges
#- Gx[nwave, nx+1]
#- Gy[nwave, ny+1]
Gx = np.exp(-0.5*xedges**2).T / np.sqrt(2. * np.pi) # (nwave, nedges)
Gy = np.exp(-0.5*yedges**2).T / np.sqrt(2. * np.pi)
# print('Gx.shape = {}'.format(Gx.shape))
# print('Gy.shape = {}'.format(Gy.shape))
#- Combine into Gauss*Hermite
GHx = HVx * Gx
GHy = HVy * Gy
#- Integrate over the pixels using the relationship
# Integral{ H_k(x) exp(-0.5 x^2) dx} = -H_{k-1}(x) exp(-0.5 x^2) + const
#- pGHx[ghdegx+1, nwave, nx]
#- pGHy[ghdegy+1, nwave, ny]
pGHx = np.zeros((ghdegx+1, nwave, nx))
pGHy = np.zeros((ghdegy+1, nwave, ny))
pGHx[0] = 0.5 * np.diff(scipy.special.erf(xedges/np.sqrt(2.)).T)
pGHy[0] = 0.5 * np.diff(scipy.special.erf(yedges/np.sqrt(2.)).T)
pGHx[1:] = GHx[:ghdegx,:,0:nx] - GHx[:ghdegx,:,1:nx+1]
pGHy[1:] = GHy[:ghdegy,:,0:ny] - GHy[:ghdegy,:,1:ny+1]
# print('pGHx.shape = {}'.format(pGHx.shape))
# print('pGHy.shape = {}'.format(pGHy.shape))
return pGHx, pGHy
@numba.jit(nopython=True)
def multispot(pGHx, pGHy, ghc):
'''
TODO: Document
'''
nx = pGHx.shape[-1]
ny = pGHy.shape[-1]
nwave = pGHx.shape[1]
spots = np.zeros((nwave, ny, nx))
for iwave in range(nwave):
for i in range(pGHx.shape[0]):
px = pGHx[i,iwave]
for j in range(0, pGHy.shape[0]):
py = pGHy[j,iwave]
c = ghc[i,j,iwave]
#- c * outer(py, px)
for iy in range(len(py)):
for ix in range(len(px)):
spots[iwave, iy, ix] += c * py[iy] * px[ix]
return spots
def get_spots(specmin, nspec, wavelengths, psfdata):
'''Calculate PSF spots for the specified spectra and wavelengths
Args:
specmin: first spectrum to include
nspec: number of spectra to evaluate spots for
wavelengths: 1D array of wavelengths
psfdata: PSF data from io.read_psf() of Gauss Hermite PSF file
Returns:
spots: 4D array[ispec, iwave, ny, nx] of PSF spots
corners: (xc,yc) where each is 2D array[ispec,iwave] lower left corner of spot
'''
nwave = len(wavelengths)
p = evalcoeffs(psfdata, wavelengths, specmin, nspec)
nx = 2*p['HSIZEX']+1
ny = 2*p['HSIZEY']+1
spots = np.zeros((nspec, nwave, ny, nx))
for ispec in range(nspec):
pGHx, pGHy = calc_pgh(ispec, wavelengths, p)
spots[ispec] = multispot(pGHx, pGHy, p['GH'][:,:,ispec,:])
#- ensure positivity and normalize
#- TODO: should this be within multispot itself?
spots = spots.clip(0.0)
norm = np.sum(spots, axis=(2,3)) #- norm[nspec, nwave] = sum over each spot
spots = (spots.T / norm.T).T #- transpose magic for numpy array broadcasting
#- Define corners of spots
#- extra 0.5 is because X and Y are relative to center of pixel not edge
xc = np.floor(p['X'] - p['HSIZEX'] + 0.5).astype(int)
yc = np.floor(p['Y'] - p['HSIZEY'] + 0.5).astype(int)
corners = (xc, yc)
return spots, corners, p
@numba.jit
def get_xyrange(ispec, nspec, iwave, nwave, spots, corners):
"""
Find xy ranges that these spectra cover
Args:
ispec: starting spectrum index
nspec: number of spectra
iwave: starting wavelength index
nwave: number of wavelengths
spots: 4D array[ispec, iwave, ny, nx] of PSF spots
corners: (xc,yc) where each is 2D array[ispec,iwave] lower left corner of spot
Returns (xmin, xmax, ymin, ymax)
spots[ispec:ispec+nspec,iwave:iwave+nwave] touch pixels[ymin:ymax,xmin:xmax]
"""
ny, nx = spots.shape[2:4]
xc = corners[0][ispec:ispec+nspec, iwave:iwave+nwave]
yc = corners[1][ispec:ispec+nspec, iwave:iwave+nwave]
xmin = np.min(xc)
xmax = np.max(xc) + nx
ymin = np.min(yc)
ymax = np.max(yc) + ny
return xmin, xmax, ymin, ymax
@numba.jit
def projection_matrix(ispec, nspec, iwave, nwave, spots, corners):
'''
Create the projection matrix A for p = Af
Args:
ispec: starting spectrum index
nspec: number of spectra
iwave: starting wavelength index
nwave: number of wavelengths
spots: 4D array[ispec, iwave, ny, nx] of PSF spots
corners: (xc,yc) where each is 2D array[ispec,iwave] lower left corner of spot
Returns (A[iy, ix, ispec, iwave], (xmin, xmax, ymin, ymax))
Cast to 2D for using with linear algebra:
nypix, nxpix, nspec, nwave = A.shape
A2D = A.reshape((nypix*nxpix, nspec*nwave))
pix1D = A2D.dot(flux1D)
'''
ny, nx = spots.shape[2:4]
xc, yc = corners
xmin, xmax, ymin, ymax = get_xyrange(ispec, nspec, iwave, nwave, spots, corners)
A = np.zeros((ymax-ymin,xmax-xmin,nspec,nwave))
for i in range(nspec):
for j in range(nwave):
ixc = xc[ispec+i, iwave+j] - xmin
iyc = yc[ispec+i, iwave+j] - ymin
A[iyc:iyc+ny, ixc:ixc+nx, i, j] = spots[ispec+i,iwave+j]
return A, (xmin, xmax, ymin, ymax)
def get_spec_padding(ispec, nspec, bundlesize):
"""
Calculate padding needed for boundary spectra
Args:
ispec: starting spectrum index
nspec: number of spectra to extract (not including padding)
bundlesize: size of fiber bundles; padding not needed on their edges
returns specmin, nspecpad
"""
#- if not at upper boundary, extract one additional spectrum
if (ispec+nspec) % bundlesize == 0:
nspecpad = nspec
else:
nspecpad = nspec + 1
#- if not at lower boundary, start one lower and extract one more
if ispec % bundlesize == 0:
specmin = ispec
else:
specmin = ispec-1
nspecpad += 1
assert nspecpad <= nspec+2
assert specmin >= ispec-1
assert specmin+nspecpad <= ispec+nspec+1
return specmin, nspecpad
def get_resolution_diags(R, ndiag, ispec, nspec, nwave, wavepad):
"""Returns the diagonals of R in a form suited for creating scipy.sparse.dia_matrix
Args:
R: dense resolution matrix
ndiag: number of diagonal elements to keep in the resolution matrix
ispec: starting spectrum index relative to padding
nspec: number of spectra to extract (not including padding)
nwave: number of wavelengths to extract (not including padding)
wavepad: number of extra wave bins to extract (and discard) on each end
Returns:
Rdiags (nspec, 2*ndiag+1, nwave): resolution matrix diagonals
"""
nwavetot = 2*wavepad + nwave
Rdiags = np.zeros( (nspec, 2*ndiag+1, nwave) )
#- TODO: check indexing
for i in np.arange(ispec, ispec+nspec):
#- subregion of R for this spectrum
ii = slice(nwavetot*i, nwavetot*(i+1))
Rx = R[ii, ii]
#- subregion of non-padded wavelengths for this spectrum
for j in range(wavepad,wavepad+nwave):
# Rdiags dimensions [nspec, 2*ndiag+1, nwave]
Rdiags[i-ispec, :, j-wavepad] = Rx[j-ndiag:j+ndiag+1, j]
return Rdiags
def ex2d_padded(image, imageivar, patch, spots, corners, pixpad_frac, regularize, model, psferr):
"""
Extracted a patch with border padding, but only return results for patch
Args:
image: full image (not trimmed to a particular xy range)
imageivar: image inverse variance (same dimensions as image)
ispec: starting spectrum index relative to `spots` indexing
nspec: number of spectra to extract (not including padding)
iwave: starting wavelength index
nwave: number of wavelengths to extract (not including padding)
spots: array[nspec, nwave, ny, nx] pre-evaluated PSF spots
corners: tuple of arrays xcorners[nspec, nwave], ycorners[nspec, nwave]
wavepad: number of extra wave bins to extract (and discard) on each end
Options:
bundlesize: size of fiber bundles; padding not needed on their edges
"""
ispec = patch.ispec - patch.bspecmin
nspec = patch.nspectra_per_patch
iwave = patch.iwave
nwave = patch.nwavestep
wavepad = patch.wavepad
specmin, nspecpad = get_spec_padding(ispec, nspec, patch.bundlesize)
#- Total number of wavelengths to be extracted, including padding
nwavetot = nwave+2*wavepad
#- Get the projection matrix for the full wavelength range with padding
A4, xyrange = projection_matrix(specmin, nspecpad,
iwave-wavepad, nwave+2*wavepad, spots, corners)
xmin, xmax, ypadmin, ypadmax = xyrange
#- But we only want to use the pixels covered by the original wavelengths
#- TODO: this unnecessarily also re-calculates xranges
xlo, xhi, ymin, ymax = get_xyrange(specmin, nspecpad, iwave, nwave, spots, corners)
ypadlo = int((ymin - ypadmin) * (1 - pixpad_frac))
ypadhi = int((ymax - ypadmin) + (ypadmax - ymax) * (pixpad_frac))
A4 = A4[ypadlo:ypadhi]
#- Number of image pixels in y and x
ny, nx = A4.shape[0:2]
ymin = ypadmin+ypadlo
ymax = ypadmin+ypadhi
#- Check dimensions
assert A4.shape[2] == nspecpad
assert A4.shape[3] == nwave + 2*wavepad
#- Diagonals of R in a form suited for creating scipy.sparse.dia_matrix
ndiag = spots.shape[2]//2
specslice = np.s_[ispec-specmin:ispec-specmin+nspec,wavepad:wavepad+nwave]
if (0 <= ymin) & (ymin+ny <= image.shape[0]):
xyslice = np.s_[ymin:ymin+ny, xmin:xmin+nx]
patchpixels = image[xyslice]
patchivar = imageivar[xyslice]
fx, ivarfx, R = ex2d_patch(patchpixels, patchivar, A4, regularize=regularize)
#- Select the non-padded spectra x wavelength core region
specflux = fx[specslice]
specivar = ivarfx[specslice]
#- Diagonals of R in a form suited for creating scipy.sparse.dia_matrix
Rdiags = get_resolution_diags(R, ndiag, ispec-specmin, nspec, nwave, wavepad)
else:
#- TODO: this zeros out the entire patch if any of it is off the edge
#- of the image; we can do better than that
fx = np.zeros((nspecpad, nwavetot))
specflux = np.zeros((nspec, nwave))
specivar = np.zeros((nspec, nwave))
Rdiags = np.zeros( (nspec, 2*ndiag+1, nwave) )
# xyslice = np.s_[
# max(0, ymin):min(ymin+ny, image.shape[0]),
# max(0, xmin):min(xmin+nx, image.shape[1])
# ]
xyslice = None
patchivar = np.zeros((ny, nx))
patchpixels = np.zeros((ny, nx))
if np.any(np.isnan(specflux)):
raise RuntimeError('Found NaN in extracted flux')
Apadded = A4.reshape(ny*nx, nspecpad*nwavetot)
Apatch = A4[:, :, ispec-specmin:ispec-specmin+nspec, wavepad:wavepad+nwave]
Apatch = Apatch.reshape(ny*nx, nspec*nwave)
pixmask_fraction = Apatch.T.dot(patchivar.ravel() == 0)
pixmask_fraction = pixmask_fraction.reshape(nspec, nwave)
modelpadded = Apadded.dot(fx.ravel()).reshape(ny, nx)
modelivar = (modelpadded*psferr + 1e-32)**-2
ii = (modelivar > 0 ) & (patchivar > 0)
totpix_ivar = np.zeros((ny, nx))
totpix_ivar[ii] = 1.0 / (1.0/modelivar[ii] + 1.0/patchivar[ii])
#- Weighted chi2 of pixels that contribute to each flux bin;
#- only use unmasked pixels and avoid dividing by 0
chi = (patchpixels - modelpadded)*np.sqrt(totpix_ivar)
psfweight = Apadded.T.dot(totpix_ivar.ravel() > 0)
bad = psfweight == 0
#- Compute chi2pix and reshape
chi2pix = (Apadded.T.dot(chi.ravel()**2) * ~bad) / (psfweight + bad)
chi2pix = chi2pix.reshape(nspecpad, nwavetot)[specslice]
if model:
modelimage = Apatch.dot(specflux.ravel()).reshape(ny, nx)
else:
modelimage = None
#- TODO: add chi2pix, pixmask_fraction, optionally modelimage; see specter
result = dict(
flux = specflux,
ivar = specivar,
Rdiags = Rdiags,
modelimage = modelimage,
xyslice = xyslice,
pixmask_fraction = pixmask_fraction,
chi2pix = chi2pix,
)
return result
#- Simplest form of A.T.dot( Diag(w).dot(A) )
def dotdot1(A, w):
'''
return A.T.dot( Diag(w).dot(A) ) = (A.T * w).dot(A)
'''
return (A.T * w).dot(A)
#- 2x faster than dotdot1 by using sparse arrays
def dotdot2(A, w):
'''
return A.T.dot( Diag(w).dot(A) ) when A is sparse
'''
import scipy.sparse
W = scipy.sparse.spdiags(data=w, diags=[0,], m=len(w), n=len(w))
Ax = scipy.sparse.csc_matrix(A)
return Ax.T.dot(W.dot(Ax)).toarray()
#- 3x faster than dotdot1 by using numba and sparsity
@numba.jit(nopython=True)
def dotdot3(A, w):
'''
return A.T.dot( Diag(w).dot(A) ) when A is sparse using numba
'''
n, m = A.shape
B = np.zeros((m,m))
for i in range(n):
for j1 in range(m):
Aw = w[i] * A[i,j1]
if Aw != 0.0:
for j2 in range(j1, m):
tmp = Aw * A[i,j2]
B[j1, j2] += tmp
#- fill in other half
for j1 in range(m-1):
for j2 in range(j1+1, m):
B[j2, j1] = B[j1, j2]
return B
@numba.jit(nopython=True)
def dotall(p, w, A):
'''Compute icov, y and fluxweight in the same loop(s)
icov = A^T W A
y = A^T W p
fluxweight = (A^T W).sum(axis=1)
Arguments:
pixel_values: pixel values
pixel_ivar: pixel weights
A: projection matrix
Returns:
icov, y, fluxweight
'''
n, m = A.shape
icov = np.zeros((m,m))
y = | np.zeros(m) | numpy.zeros |
from typing import Sequence, Tuple
import numpy as np
def create_rotation_matrix(
image_orientation: Sequence[float],
) -> np.ndarray:
"""Builds a rotation matrix.
Parameters
----------
image_orientation: Sequence[float]
Cosines of the row direction (first triplet: horizontal, left to right,
increasing column index) and the column direction (second triplet:
vertical, top to bottom, increasing row index) direction expressed in
the three-dimensional patient or slide coordinate system defined by the
frame of reference
Returns
-------
numpy.ndarray
3 x 3 rotation matrix
"""
if len(image_orientation) != 6:
raise ValueError('Argument "image_orientation" must have length 6.')
row_cosines = np.array(image_orientation[:3], dtype=float)
column_cosines = np.array(image_orientation[3:], dtype=float)
n = np.cross(row_cosines.T, column_cosines.T)
return np.column_stack([
row_cosines,
column_cosines,
n
])
def _create_affine_transformation_matrix(
image_position: Sequence[float],
image_orientation: Sequence[float],
pixel_spacing: Sequence[float],
) -> np.ndarray:
"""Create affine matrix for transformation.
The resulting transformation matrix maps the center of a pixel identified
by zero-based integer indices into the frame of reference, i.e., an input
value of (0, 0) represents the center of the top left hand corner pixel.
See :dcm:`Equation C.7.6.2.1-1 <part03/sect_C.7.6.2.html#sect_C.7.6.2.1.1>`.
Parameters
----------
image_position: Sequence[float]
Position of the slice (image or frame) in the frame of reference, i.e.,
the offset of the top left hand corner pixel in the pixel matrix from
the origin of the reference coordinate system along the X, Y, and Z
axis
image_orientation: Sequence[float]
Cosines of the row direction (first triplet: horizontal, left to
right, increasing column index) and the column direction (second
triplet: vertical, top to bottom, increasing row index) direction
expressed in the three-dimensional patient or slide coordinate
system defined by the frame of reference
pixel_spacing: Sequence[float]
Spacing between pixels in millimeter unit along the column
direction (first value: spacing between rows, vertical, top to
bottom, increasing row index) and the rows direction (second value:
spacing between columns: horizontal, left to right, increasing
column index)
Returns
-------
numpy.ndarray
4 x 4 affine transformation matrix
"""
if not isinstance(image_position, Sequence):
raise TypeError('Argument "image_position" must be a sequence.')
if len(image_position) != 3:
raise ValueError('Argument "image_position" must have length 3.')
if not isinstance(image_orientation, Sequence):
raise TypeError('Argument "image_orientation" must be a sequence.')
if len(image_orientation) != 6:
raise ValueError('Argument "image_orientation" must have length 6.')
if not isinstance(pixel_spacing, Sequence):
raise TypeError('Argument "pixel_spacing" must be a sequence.')
if len(pixel_spacing) != 2:
raise ValueError('Argument "pixel_spacing" must have length 2.')
x_offset = float(image_position[0])
y_offset = float(image_position[1])
z_offset = float(image_position[2])
translation = np.array([x_offset, y_offset, z_offset], dtype=float)
rotation = create_rotation_matrix(image_orientation)
# Column direction (spacing between rows)
column_spacing = float(pixel_spacing[0])
# Row direction (spacing between columns)
row_spacing = float(pixel_spacing[1])
rotation[:, 0] *= row_spacing
rotation[:, 1] *= column_spacing
# 4x4 transformation matrix
return np.row_stack(
[
np.column_stack([
rotation,
translation,
]),
[0.0, 0.0, 0.0, 1.0]
]
)
def _create_inv_affine_transformation_matrix(
image_position: Sequence[float],
image_orientation: Sequence[float],
pixel_spacing: Sequence[float],
spacing_between_slices: float = 1.0
) -> np.ndarray:
"""Create affine matrix for inverse transformation.
The resulting transformation matrix maps a frame of reference coordinate to
pixel indices, where integer pixel index values represent the center of the
pixel in the image, i.e., an output value of exactly (0.0, 0.0) represents
the center of the top left hand corner pixel.
Parameters
----------
image_position: Sequence[float]
Position of the slice (image or frame) in the frame of reference, i.e.,
the offset of the top left hand corner pixel in the pixel matrix from
the origin of the reference coordinate system along the X, Y, and Z
axis
image_orientation: Sequence[float]
Cosines of the row direction (first triplet: horizontal, left to
right, increasing column index) and the column direction (second
triplet: vertical, top to bottom, increasing row index) direction
expressed in the three-dimensional patient or slide coordinate
system defined by the frame of reference
pixel_spacing: Sequence[float]
Spacing between pixels in millimeter unit along the column
direction (first value: spacing between rows, vertical, top to
bottom, increasing row index) and the rows direction (second value:
spacing between columns: horizontal, left to right, increasing
column index)
spacing_between_slices: float, optional
Distance (in the coordinate defined by the frame of reference)
between neighboring slices. Default: 1
Raises
------
TypeError
When `image_position`, `image_orientation`, or `pixel_spacing` is
not a sequence.
ValueError
When `image_position`, `image_orientation`, or `pixel_spacing` has
an incorrect length.
"""
if not isinstance(image_position, Sequence):
raise TypeError('Argument "image_position" must be a sequence.')
if len(image_position) != 3:
raise ValueError('Argument "image_position" must have length 3.')
if not isinstance(image_orientation, Sequence):
raise TypeError('Argument "image_orientation" must be a sequence.')
if len(image_orientation) != 6:
raise ValueError('Argument "image_orientation" must have length 6.')
if not isinstance(pixel_spacing, Sequence):
raise TypeError('Argument "pixel_spacing" must be a sequence.')
if len(pixel_spacing) != 2:
raise ValueError('Argument "pixel_spacing" must have length 2.')
x_offset = float(image_position[0])
y_offset = float(image_position[1])
z_offset = float(image_position[2])
translation = np.array([x_offset, y_offset, z_offset])
rotation = create_rotation_matrix(image_orientation)
# Column direction (spacing between rows)
column_spacing = float(pixel_spacing[0])
# Row direction (spacing between columns)
row_spacing = float(pixel_spacing[1])
rotation[:, 0] *= row_spacing
rotation[:, 1] *= column_spacing
rotation[:, 2] *= spacing_between_slices
inv_rotation = np.linalg.inv(rotation)
# 4x4 transformation matrix
return np.row_stack(
[
np.column_stack([
inv_rotation,
-np.dot(inv_rotation, translation)
]),
[0.0, 0.0, 0.0, 1.0]
]
)
class PixelToReferenceTransformer(object):
"""Class for transforming pixel indices to reference coordinates.
This class facilitates the mapping of pixel indices to the pixel matrix of
an image or an image frame (tile or plane) into the patient or slide
coordinate system defined by the frame of reference.
Pixel indices are (column, row) pairs of zero-based integer values, where
the (0, 0) index is located at the **center** of the top left hand corner
pixel of the pixel matrix.
Reference coordinates are (x, y, z) triplets of floating-point values,
where the (0.0, 0.0) point is located at the origin of the frame of
reference.
Examples
--------
>>> import numpy as np
>>>
>>> # Create a transformer by specifying the reference space of
>>> # an image
>>> transformer = PixelToReferenceTransformer(
... image_position=[56.0, 34.2, 1.0],
... image_orientation=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0],
... pixel_spacing=[0.5, 0.5])
>>>
>>> # Use the transformer to convert coordinates
>>> pixel_indices = np.array([[0, 10], [5, 5]])
>>> ref_coords = transformer(pixel_indices)
>>> print(ref_coords)
[[56. 39.2 1. ]
[58.5 36.7 1. ]]
Warning
-------
This class shall not be used to map spatial coordinates (SCOORD)
to 3D spatial coordinates (SCOORD3D). Use the
:class:`highdicom.spatial.ImageToReferenceTransformer` class instead.
"""
def __init__(
self,
image_position: Sequence[float],
image_orientation: Sequence[float],
pixel_spacing: Sequence[float],
):
"""Construct transformation object.
Parameters
----------
image_position: Sequence[float]
Position of the slice (image or frame) in the frame of reference,
i.e., the offset of the top left hand corner pixel in the pixel
matrix from the origin of the reference coordinate system along the
X, Y, and Z axis
image_orientation: Sequence[float]
Cosines of the row direction (first triplet: horizontal, left to
right, increasing column index) and the column direction (second
triplet: vertical, top to bottom, increasing row index) direction
expressed in the three-dimensional patient or slide coordinate
system defined by the frame of reference
pixel_spacing: Sequence[float]
Spacing between pixels in millimeter unit along the column
direction (first value: spacing between rows, vertical, top to
bottom, increasing row index) and the rows direction (second value:
spacing between columns: horizontal, left to right, increasing
column index)
Raises
------
TypeError
When any of the arguments is not a sequence.
ValueError
When any of the arguments has an incorrect length.
"""
self._affine = _create_affine_transformation_matrix(
image_position=image_position,
image_orientation=image_orientation,
pixel_spacing=pixel_spacing
)
@property
def affine(self) -> np.ndarray:
"""numpy.ndarray: 4x4 affine transformation matrix"""
return self._affine
def __call__(self, indices: np.ndarray) -> np.ndarray:
"""Transform image pixel indices to frame of reference coordinates.
Parameters
----------
indices: numpy.ndarray
Array of (column, row) zero-based pixel indices in the range
[0, Columns - 1] and [0, Rows - 1], respectively.
Array of integer values with shape ``(n, 2)``, where *n* is
the number of indices, the first column represents the `column`
index and the second column represents the `row` index.
The ``(0, 0)`` coordinate is located at the **center** of the top
left pixel in the total pixel matrix.
Returns
-------
numpy.ndarray
Array of (x, y, z) coordinates in the coordinate system defined by
the frame of reference. Array has shape ``(n, 3)``, where *n* is
the number of coordinates, the first column represents the `x`
offsets, the second column represents the `y` offsets and the third
column represents the `z` offsets
Raises
------
ValueError
When `indices` has incorrect shape.
TypeError
When `indices` don't have integer data type.
"""
if indices.shape[1] != 2:
raise ValueError(
'Argument "indices" must be a two-dimensional array '
'with shape [n, 2].'
)
if indices.dtype.kind not in ('u', 'i'):
raise TypeError(
'Argument "indices" must be a two-dimensional array '
'of integers.'
)
pixel_matrix_coordinates = np.row_stack([
indices.T.astype(float),
np.zeros((indices.shape[0], ), dtype=float),
np.ones((indices.shape[0], ), dtype=float),
])
reference_coordinates = np.dot(self._affine, pixel_matrix_coordinates)
return reference_coordinates[:3, :].T
class ReferenceToPixelTransformer(object):
"""Class for transforming reference coordinates to pixel indices.
This class facilitates the mapping of coordinates in the patient or slide
coordinate system defined by the frame of reference into the total pixel
matrix.
Reference coordinates are (x, y, z) triplets of floating-point values,
where the (0.0, 0.0) point is located at the origin of the frame of
reference.
Pixel indices are (column, row) pairs of zero-based integer values, where
the (0, 0) index is located at the **center** of the top left hand corner
pixel of the pixel matrix.
Examples
--------
>>> transformer = ReferenceToPixelTransformer(
... image_position=[56.0, 34.2, 1.0],
... image_orientation=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0],
... pixel_spacing=[0.5, 0.5]
... )
>>>
>>> ref_coords = np.array([[56., 39.2, 1. ], [58.5, 36.7, 1.]])
>>> pixel_indices = transformer(ref_coords)
>>> print(pixel_indices)
[[ 0 10 0]
[ 5 5 0]]
Warning
-------
This class shall not be used to map 3D spatial coordinates (SCOORD3D)
to spatial coordinates (SCOORD). Use the
:class:`highdicom.spatial.ReferenceToImageTransformer` class instead.
"""
def __init__(
self,
image_position: Sequence[float],
image_orientation: Sequence[float],
pixel_spacing: Sequence[float],
spacing_between_slices: float = 1.0
):
"""Construct transformation object.
Builds an inverse of an affine transformation matrix for mapping
coordinates from the frame of reference into the two
dimensional pixel matrix.
Parameters
----------
image_position: Sequence[float]
Position of the slice (image or frame) in the frame of reference,
i.e., the offset of the top left hand corner pixel in the pixel
matrix from the origin of the reference coordinate system along the
X, Y, and Z axis
image_orientation: Sequence[float]
Cosines of the row direction (first triplet: horizontal, left to
right, increasing column index) and the column direction (second
triplet: vertical, top to bottom, increasing row index) direction
expressed in the three-dimensional patient or slide coordinate
system defined by the frame of reference
pixel_spacing: Sequence[float]
Spacing between pixels in millimeter unit along the column
direction (first value: spacing between rows, vertical, top to
bottom, increasing row index) and the rows direction (second value:
spacing between columns: horizontal, left to right, increasing
column index)
spacing_between_slices: float, optional
Distance (in the coordinate defined by the frame of reference)
between neighboring slices. Default: 1
Raises
------
TypeError
When `image_position`, `image_orientation` or `pixel_spacing` is
not a sequence.
ValueError
When `image_position`, `image_orientation` or `pixel_spacing` has
an incorrect length.
"""
self._affine = _create_inv_affine_transformation_matrix(
image_position=image_position,
image_orientation=image_orientation,
pixel_spacing=pixel_spacing,
spacing_between_slices=spacing_between_slices
)
@property
def affine(self) -> np.ndarray:
"""numpy.ndarray: 4 x 4 affine transformation matrix"""
return self._affine
def __call__(self, coordinates: np.ndarray) -> np.ndarray:
"""Transform frame of reference coordinates into image pixel indices.
Parameters
----------
coordinates: numpy.ndarray
Array of (x, y, z) coordinates in the coordinate system defined by
the frame of reference. Array has shape ``(n, 3)``, where *n* is
the number of coordinates, the first column represents the *X*
offsets, the second column represents the *Y* offsets and the third
column represents the *Z* offsets
Returns
-------
numpy.ndarray
Array of (column, row) zero-based indices at pixel resolution.
Array of integer values with shape ``(n, 2)``, where *n* is
the number of indices, the first column represents the `column`
index and the second column represents the `row` index.
The ``(0, 0)`` coordinate is located at the **center** of the top
left pixel in the total pixel matrix.
Note
----
The returned pixel indices may be negative if `coordinates` fall
outside of the total pixel matrix.
Raises
------
ValueError
When `indices` has incorrect shape.
"""
if coordinates.shape[1] != 3:
raise ValueError(
'Argument "coordinates" must be a two-dimensional array '
'with shape [n, 3].'
)
reference_coordinates = np.row_stack([
coordinates.T.astype(float),
np.ones((coordinates.shape[0], ), dtype=float)
])
pixel_matrix_coordinates = np.dot(self._affine, reference_coordinates)
return np.around(pixel_matrix_coordinates[:3, :].T).astype(int)
class ImageToReferenceTransformer(object):
"""Class for transforming coordinates from image to reference space.
This class facilitates the mapping of image coordinates in the pixel matrix
of an image or an image frame (tile or plane) into the patient or slide
coordinate system defined by the frame of reference.
For example, this class may be used to map spatial coordinates (SCOORD)
to 3D spatial coordinates (SCOORD3D).
Image coordinates are (column, row) pairs of floating-point values, where
the (0.0, 0.0) point is located at the top left corner of the top left hand
corner pixel of the pixel matrix. Image coordinates have pixel units at
sub-pixel resolution.
Reference coordinates are (x, y, z) triplets of floating-point values,
where the (0.0, 0.0) point is located at the origin of the frame of
reference. Reference coordinates have millimeter units.
Examples
--------
>>> transformer = ImageToReferenceTransformer(
... image_position=[56.0, 34.2, 1.0],
... image_orientation=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0],
... pixel_spacing=[0.5, 0.5]
... )
>>>
>>> image_coords = np.array([[0.0, 10.0], [5.0, 5.0]])
>>> ref_coords = transformer(image_coords)
>>> print(ref_coords)
[[55.75 38.95 1. ]
[58.25 36.45 1. ]]
Warning
-------
This class shall not be used for pixel indices. Use the
class:`highdicom.spatial.PixelToReferenceTransformer` class instead.
"""
def __init__(
self,
image_position: Sequence[float],
image_orientation: Sequence[float],
pixel_spacing: Sequence[float]
):
"""Construct transformation object.
Parameters
----------
image_position: Sequence[float]
Position of the slice (image or frame) in the frame of reference,
i.e., the offset of the top left hand corner pixel in the pixel
matrix from the origin of the reference coordinate system along the
X, Y, and Z axis
image_orientation: Sequence[float]
Cosines of the row direction (first triplet: horizontal, left to
right, increasing column index) and the column direction (second
triplet: vertical, top to bottom, increasing row index) direction
expressed in the three-dimensional patient or slide coordinate
system defined by the frame of reference
pixel_spacing: Sequence[float]
Spacing between pixels in millimeter unit along the column
direction (first value: spacing between rows, vertical, top to
bottom, increasing row index) and the rows direction (second value:
spacing between columns: horizontal, left to right, increasing
column index)
Raises
------
TypeError
When any of the arguments is not a sequence.
ValueError
When any of the arguments has an incorrect length.
"""
affine = _create_affine_transformation_matrix(
image_position=image_position,
image_orientation=image_orientation,
pixel_spacing=pixel_spacing
)
correction_affine = np.array([
[1.0, 0.0, 0.0, -0.5],
[0.0, 1.0, 0.0, -0.5],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
])
self._affine = np.dot(affine, correction_affine)
@property
def affine(self) -> np.ndarray:
"""numpy.ndarray: 4x4 affine transformation matrix"""
return self._affine
def __call__(self, coordinates: np.ndarray) -> np.ndarray:
"""Transform image coordinates to frame of reference coordinates.
Parameters
----------
coordinates: numpy.ndarray
Array of (column, row) coordinates at sub-pixel resolution in the
range [0, Columns] and [0, Rows], respectively.
Array of floating-point values with shape ``(n, 2)``, where *n* is
the number of coordinates, the first column represents the `column`
values and the second column represents the `row` values.
The ``(0.0, 0.0)`` coordinate is located at the top left corner
of the top left hand corner pixel in the total pixel matrix.
Returns
-------
numpy.ndarray
Array of (x, y, z) coordinates in the coordinate system defined by
the frame of reference. Array has shape ``(n, 3)``, where *n* is
the number of coordinates, the first column represents the *X*
offsets, the second column represents the *Y* offsets and the third
column represents the *Z* offsets
Raises
------
ValueError
When `coordinates` has incorrect shape.
"""
if coordinates.shape[1] != 2:
raise ValueError(
'Argument "coordinates" must be a two-dimensional array '
'with shape [n, 2].'
)
image_coordinates = np.row_stack([
coordinates.T.astype(float),
np.zeros((coordinates.shape[0], ), dtype=float),
np.ones((coordinates.shape[0], ), dtype=float),
])
reference_coordinates = | np.dot(self._affine, image_coordinates) | numpy.dot |
import matplotlib.pyplot as plt
import torch
import torch.nn
import numpy as np
from mpl_toolkits.basemap import Basemap
from metpy.plots import colortables
import cv2
def downsample(x, pool=25):
x = torch.from_numpy(x[np.newaxis])
xlr = torch.nn.AvgPool2d(pool, stride=pool)(x)
return xlr.detach().numpy()[0]
def flow_quiver_plot(u, v, x=None, y=None, ax=None,
down=25, vmin=None, vmax=None, latlon=False,
size=10, cmap='jet', colorbar=False):
intensity = (u**2 + v**2) ** 0.5
u_l = downsample(u, down)
v_l = downsample(v, down)
intensity_l = ( u_l ** 2 + v_l**2 ) ** 0.5
#intensity_l = ( u_l ** 2 + v_l**2 ) ** 0.25
u_l = u_l #/ intensity_l
v_l = v_l #/ intensity_l
if (x is None) or (y is None):
x = np.arange(0, u_l.shape[1]) * down + down/2.
y = np.arange(0, v_l.shape[0]) * down + down/2.
X, Y = np.meshgrid(x, y)
else:
x = downsample(x, down)
y = downsample(y, down)
X, Y = x, y
#if X.shape[0] != u_l.shape[0]:
#Y = cv2.resize(Y, dsize=v_l.shape, interpolation=cv2.INTER_LINEAR)
#X = cv2.resize(X, dsize=u_l.shape, interpolation=cv2.INTER_LINEAR)
if not ax:
ratio = 1.*u.shape[0] / u.shape[1]
hi = int(ratio * size)
wi = int(size)
fig = plt.figure(figsize=(wi,hi), frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
if vmax is None:
vmax = | np.nanmax(intensity) | numpy.nanmax |
#Differential Photometry script written in April 2019 by SKB, MP, KWD for WIYN 0.9m HDI data
#This script calculates photometry and differential photometry for all stars in an image and takes target positions to pull out differential photometry of target stars. Auto calculates comparison stars based on lowest percentile of variability of stars in the image.
# Script is run through a shell jupyter notebook script.
#Initially created by <NAME> as a juypter notebook 2018
#Turned into modular form by <NAME> April 2019
#Modified by <NAME>, <NAME>, <NAME> April 2019
# python 2/3 compatibility
from __future__ import print_function
# numerical python
import numpy as np
# file management tools
import glob
import os
# good module for timing tests
import time
# plotting stuff
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# ability to read/write fits files
from astropy.io import fits
# fancy image combination technique
from astropy.stats import sigma_clip
# median absolute deviation: for photometry
from astropy.stats import mad_std
# photometric utilities
from photutils import DAOStarFinder,aperture_photometry, CircularAperture, CircularAnnulus, Background2D, MedianBackground
# periodograms
from astropy.stats import LombScargle
from regions import read_ds9, write_ds9
from astropy.wcs import WCS
import warnings
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.visualization import ZScaleInterval
import numpy.ma as ma
warnings.filterwarnings("ignore")
np.set_printoptions(suppress=True)
def construct_astrometry(hdr_wcs):
'''
construct_astrometry
make the pixel to RA/Dec conversion (and back) from the header of an astrometry.net return
inputs
------------------------------
hdr_wcs : header with astrometry information, typically from astrometry.net
returns
------------------------------
w : the WCS instance
'''
# initialize the World Coordinate System
w = WCS(naxis=2)
# specify the pixel to RA/Dec conversion
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.cd = np.array([[hdr_wcs['CD1_1'],hdr_wcs['CD1_2']],[hdr_wcs['CD2_1'],hdr_wcs['CD2_2']]])
w.wcs.crpix = [hdr_wcs['CRPIX1'], hdr_wcs['CRPIX2']]
w.wcs.crval = [hdr_wcs['CRVAL1'],hdr_wcs['CRVAL2']]
w.wcs.cunit = [hdr_wcs['CUNIT1'],hdr_wcs['CUNIT2']]
w.wcs.latpole = hdr_wcs['LATPOLE']
#w.wcs.lonpole = hdr_wcs['LONPOLE']
w.wcs.theta0 = hdr_wcs['LONPOLE']
w.wcs.equinox = hdr_wcs['EQUINOX']
# calculate the RA/Dec to pixel conversion
w.wcs.fix()
w.wcs.cdfix()
w.wcs.set()
# return the instance
return w
def StarFind(imname, FWHM, nsigma):
'''
StarFind
find all stars in a .fits image
inputs
----------
imname: name of .fits image to open.
FWHM: fwhm of stars in field
nsigma: number of sigma above background above which to select sources. (~3 to 4 is a good estimate)
outputs
--------
xpos: x positions of sources
ypos: y positions of sources
nstars: number of stars found in image
'''
#open image
im,hdr=fits.getdata(imname, header=True)
im = np.array(im).astype('float')
#determine background
bkg_sigma = mad_std(im)
print('begin: DAOStarFinder')
daofind = DAOStarFinder(fwhm=FWHM, threshold=nsigma*bkg_sigma, exclude_border=True)
sources = daofind(im)
#x and y positions
xpos = sources['xcentroid']
ypos = sources['ycentroid']
#number of stars found
nstars = len(xpos)
print('found ' + str(nstars) + ' stars')
return xpos, ypos, nstars
def makeApertures(xpos, ypos, aprad,skybuff, skywidth):
'''
makeApertures
makes a master list of apertures and the annuli
inputs
---------
xpos: list - x positions of stars in image
ypos: list - y positions of stars in image
aprad: float - aperture radius
skybuff: float - sky annulus inner radius
skywidth: float - sky annulus outer radius
outputs
--------
apertures: list - list of aperture positions and radius
annulus_apertures: list - list of annuli positions and radius
see: https://photutils.readthedocs.io/en/stable/api/photutils.CircularAperture.html#photutils.CircularAperture
for more details
'''
# make the master list of apertures
apertures = CircularAperture((xpos, ypos), r=aprad)
annulus_apertures = CircularAnnulus((xpos, ypos), r_in=aprad+skybuff, r_out=aprad+skybuff+skywidth)
apers = [apertures, annulus_apertures]
return apertures, annulus_apertures
def apertureArea(apertures):
''' returns the area of the aperture'''
return apertures.area() ### should be apertures
def backgroundArea(back_aperture):
'''returns the area of the annuli'''
return back_aperture.area() ### should be annulus_apertures
def doPhotometry(imglist, xpos, ypos, aprad, skybuff, skywidth,timekey='MJD-OBS',verbose=1):
'''
doPhotomoetry*
determine the flux for each star from aperture photometry
inputs
-------
imglist: list - list of .fits images
xpos, ypos: lists - lists of x and y positions of stars
aprad, skybuff, skywidth: floats - aperture, sky annuli inner, sky annuli outer radii
outputs
-------
Times: list - time stamps of each observation from the .fits header
Photometry: list - aperture photometry flux values found at each xpos, ypos position
'''
#number of images
nimages = len(imglist)
nstars = len(xpos)
print('Found {} images'.format(nimages))
#create lists for timestamps and flux values
Times = np.zeros(nimages)
Photometry = np.zeros((nimages,nstars))
print('making apertures')
#make the apertures around each star
apertures, annulus_apertures = makeApertures(xpos, ypos, aprad, skybuff, skywidth)
#plot apertures
plt.figure(figsize=(12,12))
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(fits.getdata(imglist[0]))
plt.imshow(fits.getdata(imglist[0]), vmin=vmin,vmax=vmax, origin='lower')
apertures.plot(color='white', lw=2)
#annulus_apertures.plot(color='red', lw=2)
plt.title('apertures')
plt.show()
#determine area of apertures
area_of_ap = apertureArea(apertures)
#determine area of annuli
area_of_background = backgroundArea(annulus_apertures)
checknum = np.linspace(0,nimages,10).astype(int)
#go through each image and run aperture photometry
for ind in np.arange(nimages):
if ((ind in checknum) & (verbose==1)):
print('running aperture photometry on image: ', ind )
if (verbose>1):
print('running aperture photometry on image: ', ind )
#open image
data_image, hdr = fits.getdata(imglist[ind], header=True)
#find time stamp and append to list
Times[ind] = hdr[timekey]
#do photometry
phot_table = aperture_photometry(data_image, (apertures,annulus_apertures))
#determine flux: (aperture flux) - [(area of aperture * annuli flux)/area of background ]
flux0 = np.array(phot_table['aperture_sum_0']) - (area_of_ap/area_of_background)*np.array(phot_table['aperture_sum_1'])
#append to list
Photometry[ind,:] = flux0
return Times,Photometry
def doPhotometryError(imglist,xpos, ypos,aprad, skybuff, skywidth, flux0, GAIN=1.3, manual = False, **kwargs):
'''
doPhotometryError
determine error in photometry from background noise
two options:
- use sigma clipping and use whole background
- manually input background box positions as kwargs
inputs
--------
imglist: list - list of .fits images
xpos, ypos: lists - lists of x and y positions of stars
aprad, skybuff, skywidth: floats - aperture, sky annuli inner, sky annuli outer radii
flux0: list - aperture photometry found from doPhotometry() function
GAIN: float - average gain
manual: boolean - switch between manually inputting box (True) or using sigma clipping (False)
if True -- must have kwargs
manual = False is default
**kwargs
kwargs[xboxcorner]: float - x edge of box in pixel coords
kwargs[yboxcorner]: float - y edge of box in pixel coords
kwargs[boxsize]: float - size of box in pixel coords
'''
# find number of images in list
nimages = len(imglist)
nstars = len(xpos)
print('Found {} images'.format(nimages))
#make apertures
apertures, annulus_apertures = makeApertures(xpos, ypos, aprad, skybuff, skywidth)
#find areas of apertures and annuli
area_of_ap = apertureArea(apertures)
area_of_background = backgroundArea(annulus_apertures)
checknum = np.linspace(0,nimages,10).astype(int)
#find error in photometry
ePhotometry = np.zeros((nimages,nstars))
for ind in np.arange(nimages):
#open images
im = fits.getdata(imglist[ind])
if ind in checknum:
print('running error analysis on image ', ind)
#determine variance in background
if manual == True: #manual method -- choose back size
skyvar = np.std(im[kwargs['xboxcorner']:kwargs['xboxcorner']+kwargs['boxsize'],kwargs['yboxcorner']:kwargs['yboxcorner']+kwargs['boxsize']])**2.
err1 = skyvar*(area_of_ap)**2./(kwargs['boxsize']*kwargs['boxsize']) # uncertainty in mean sky brightness
if manual == False: #automatic method -- use sigma clipping
filtered_data = sigma_clip(im, sigma=3)
skyvar = np.std(filtered_data)**2.
err1 = skyvar*(area_of_ap)**2./(np.shape(im[0])[0]*np.shape(im[1])[0]) # uncertainty in mean sky brightness
err2 = area_of_ap * skyvar # scatter in sky values
err3 = flux0[ind]/GAIN # Poisson error
print ('Scatter in sky values: ',err2**0.5,', uncertainty in mean sky brightness: ',err1**0.5)
# sum souces of error in quadrature
errtot = (err1 + err2 + err3)**0.5
#append to list
ePhotometry[ind,:] = errtot
return ePhotometry
def mask(Photometry, ePhotometry, sn_thresh=3.):
Photometry_mask1 = ma.masked_where(Photometry <= 0, Photometry)
sn = Photometry_mask1 / ePhotometry
Photometry_mask2 = ma.masked_where(sn < sn_thresh, Photometry_mask1)
ePhotometry_mask1 = ma.masked_where(Photometry <= 0, ePhotometry)
sn = Photometry_mask1 / ePhotometry
ePhotometry_mask2 = ma.masked_where(sn < sn_thresh, ePhotometry_mask1)
return Photometry_mask2, ePhotometry_mask2
# detrend all stars
def detrend(idx, Photometry_initial, ePhotometry, nstars, sn_thresh):
'''
detrend
detrend the background for each night so we don't have to worry about changes in background noise levels
inputs
-------
photometry: list - list of flux values from aperture photometry
ephotometry: list - list of flux errors from aperture photometry
nstars: float - number of stars in the field
outputs
--------
finalPhot: list - final aperture photometry of sources with bad sources replaced with nans.
<< this is the list you want to use from now on. >>
cPhotometry: list - detrended aperture photometry
'''
sn = Photometry_initial / ePhotometry
Photometry_mask1 = ma.masked_where(Photometry_initial <= 0, Photometry_initial)
Photometry_mask2 = ma.masked_where(sn < sn_thresh, Photometry_mask1)
#mask out target stars
m = np.zeros_like(Photometry_mask2)
m[:,idx] = 1
Photometry_initial_mask3 = ma.masked_array(Photometry_mask2, m)
med_val = np.median(Photometry_initial_mask3, axis=0)
c = np.zeros_like(Photometry_initial_mask3)
c[:,med_val<=0] = 1
# get median flux value for each star (find percent change)
cPhotometry = ma.masked_array(Photometry_initial_mask3, c)
cPhotometry = cPhotometry / med_val
# do a check for outlier photometry?
for night in np.arange(len(cPhotometry)):
# remove large-scale image-to-image variation to find best stars
cPhotometry[night] = cPhotometry[night] / ma.median(cPhotometry[night])
# eliminate stars with outliers from consideration
cPhotometry_mask = ma.masked_where( ((cPhotometry < 0.5) | (cPhotometry > 1.5)), cPhotometry)
return Photometry_initial_mask3, cPhotometry_mask
def plotPhotometry(Times,cPhotometry):
'''plot detrended photometry'''
plt.figure()
for ind in np.arange(np.shape(cPhotometry)[1]):
plt.scatter(Times-np.nanmin(Times),cPhotometry[:,ind],s=1.,color='black')
# make the ranges a bit more general
plt.xlim(-0.1,1.1*np.max(Times-np.nanmin(Times)))
plt.ylim(np.nanmin(cPhotometry),np.nanmax(cPhotometry))
plt.xlabel('Observation Time [days]')
plt.ylabel('Detrended Flux')
plt.show()
def CaliforniaCoast(Photometry,cPhotometry,comp_num=9,flux_bins=6):
"""
Find the least-variable stars as a function of star brightness*
(it's called California Coast because the plot looks like California and we are looking for edge values: the coast)
inputs
--------------
Photometry : input Photometry catalog
cPhotometry : input detrended Photometry catalog
flux_bins : (default=10) maximum number of flux bins
comp_num : (default=5) minimum number of comparison stars to use
outputs
--------------
BinStars : dictionary of stars in each of the flux partitions
LeastVariable : dictionary of least variable stars in each of the flux partitions
"""
tmpX = np.nanmedian(Photometry,axis=0)
tmpY = np.nanstd(cPhotometry, axis=0)
xvals = tmpX[(np.isfinite(tmpX) & np.isfinite(tmpY))]
yvals = tmpY[(np.isfinite(tmpX) & np.isfinite(tmpY))]
kept_vals = np.where((np.isfinite(tmpX) & np.isfinite(tmpY)))[0]
#print('Keep',kept_vals)
# make the bins in flux, equal in percentile
flux_percents = np.linspace(0.,100.,flux_bins)
print('Bin Percentiles to check:',flux_percents)
# make the dictionary to return the best stars
LeastVariable = {}
BinStars = {}
for bin_num in range(0,flux_percents.size-1):
# get the flux boundaries for this bin
min_flux = np.percentile(xvals,flux_percents[bin_num])
max_flux = np.percentile(xvals,flux_percents[bin_num+1])
#print('Min/Max',min_flux,max_flux)
# select the stars meeting the criteria
w = np.where( (xvals >= min_flux) & (xvals < max_flux))[0]
BinStars[bin_num] = kept_vals[w]
# now look at the least variable X stars
nstars = w.size
#print('Number of stars in bin {}:'.format(bin_num),nstars)
# organize stars by flux uncertainty
binStarsX = xvals[w]
binStarsY = yvals[w]
# mininum Y stars in the bin:
lowestY = kept_vals[w[binStarsY.argsort()][0:comp_num]]
#print('Best {} stars in bin {}:'.format(comp_num,bin_num),lowestY)
LeastVariable[bin_num] = lowestY
return BinStars,LeastVariable
def findComparisonStars(Photometry, cPhotometry, accuracy_threshold = 0.2, plot=True,comp_num=6): #0.025
'''
findComparisonStars*
finds stars that are similar over the various nights to use as comparison stars
inputs
--------
Photometry: list - photometric values taken from detrend() function.
cPhotometry: list - detrended photometric values from detrend() function
accuracy_threshold: float - level of accuracy in fluxes between various nights
plot: boolean - True/False plot various stars and highlight comparison stars
outputs
--------
most_accurate: list - list of indices of the locations in Photometry which have the best stars to use as comparisons
'''
BinStars,LeastVariable = CaliforniaCoast(Photometry,cPhotometry,comp_num=comp_num)
star_err = ma.std(cPhotometry, axis=0)
if plot:
xvals = np.log10(ma.median(Photometry,axis=0))
yvals = np.log10(ma.std(cPhotometry, axis=0))
plt.figure()
plt.scatter(xvals,yvals,color='black',s=1.)
plt.xlabel('log Median Flux per star')
plt.ylabel('log De-trended Standard Deviation')
plt.text(np.nanmin(np.log10(ma.median(Photometry,axis=0))),np.nanmin(np.log10(star_err[star_err>0.])),\
'Less Variable',color='red',ha='left',va='bottom')
plt.text(np.nanmax(np.log10(ma.median(Photometry,axis=0))),np.nanmax(np.log10(star_err[star_err>0.])),\
'More Variable',color='red',ha='right',va='top')
for k in LeastVariable.keys():
plt.scatter(xvals[LeastVariable[k]],yvals[LeastVariable[k]],color='red')
# this is the middle key for safety
middle_key = np.array(list(LeastVariable.keys()))[len(LeastVariable.keys())//2]
# but now let's select the brightest one
best_key = np.array(list(LeastVariable.keys()))[-1]
return LeastVariable[best_key]
def runDifferentialPhotometry(photometry, ephotometry, nstars, most_accurate, sn_thresh):
'''
runDifferentialPhotometry
as the name says!
inputs
----------
Photometry: list - list of photometric values from detrend() function
ePhotometry: list - list of photometric error values
nstars: float - number of stars
most_accurate: list - list of indices of non variable comparison stars
outputs
---------
dPhotometry: list - differential photometry list
edPhotometry: list - scaling factors to photometry error
tePhotometry: list - differential photometry error
'''
Photometry = ma.masked_where(photometry <= 0, photometry)
ePhotometry = ma.masked_where(photometry <= 0, ephotometry)
#number of nights of photometry
nimages = len(Photometry)
#range of number of nights
imgindex = np.arange(0,nimages,1)
#create lists for diff photometry
dPhotometry = ma.ones([nimages, len(Photometry[0])])
edPhotometry = ma.ones([nimages, len(Photometry[0])])
eedPhotometry = ma.ones([nimages, len(Photometry[0])])
tePhotometry = ma.ones([nimages, len(Photometry[0])])
checknum = np.linspace(0,nstars,10).astype(int)
for star in np.arange(nstars):
if star in checknum:
print('running differential photometry on star: ', star+1, '/', nstars)
starPhotometry = Photometry[:,star]
starPhotometryerr = ePhotometry[:,star]
#create temporary photometry list for each comparison star
tmp_phot = ma.ones([nimages,len(most_accurate)])
#go through comparison stars and determine differential photometry
for ind, i in enumerate(most_accurate):
#pull out each star's photometry + error Photometry for each night and place in list
compStarPhotometry = Photometry[:,i]
#calculate differential photometry
tmp = starPhotometry* | ma.median(compStarPhotometry) | numpy.ma.median |
### All utility function to obtain perturbation mask
import numpy as np
import itertools
import random
import math
from utils import *
import os
import time
import scipy.io as scio
import datetime
import re
import matplotlib.pyplot as plt
import pylab
import os
import csv
from skimage import transform, filters
from textwrap import wrap
import cv2
import sys
from PIL import Image
def Get_blurred_img(input_img, img_label, model, resize_shape=(224, 224), Gaussian_param = [51, 50], Median_param = 11, blur_type= 'Gaussian', use_cuda = 1):
########################
# Generate blurred images as the baseline
# Parameters:
# -------------
# input_img: the original input image
# img_label: the classification target that you want to visualize (img_label=-1 means the top 1 classification label)
# model: the model that you want to visualize
# resize_shape: the input size for the given model
# Gaussian_param: parameters for Gaussian blur
# Median_param: parameters for median blur
# blur_type: Gaussian blur or median blur or mixed blur
# use_cuda: use gpu (1) or not (0)
####################################################
original_img = cv2.imread(input_img, 1)
original_img = cv2.resize(original_img, resize_shape)
img = | np.float32(original_img) | numpy.float32 |
"""
setinit:
this routine creates local directories and makes topo, qinit, and aux DEMs
to be used by setrun.py
If you have other files, modify this and/or your setrun.py accordingly.
"""
import numpy as np
import dclaw.topotools as gt
import os
#import pylab
#import pdb
cdir = os.path.abspath(os.environ['PWD'])
#---create local directories for data if they do not exist----------
indatadir=os.path.join(cdir,'init_data')
topodir = os.path.join(cdir,indatadir,'topo')
auxdir = os.path.join(cdir,indatadir,'aux')
qinitdir = os.path.join(cdir,indatadir,'qinit')
if not os.path.isdir(indatadir):
execstr = 'mkdir '+indatadir
os.system(execstr)
if not os.path.isdir(topodir):
execstr = 'mkdir '+topodir
os.system(execstr)
if not os.path.isdir(auxdir):
execstr = 'mkdir '+auxdir
os.system(execstr)
if not os.path.isdir(qinitdir):
execstr = 'mkdir '+qinitdir
os.system(execstr)
#------------------------------------------------------------------------
#---------------- functions for flume geometry to build DEMs ------------
def zero(X,Y):
yind1 = np.where((Y[:,0]>=-0.5)&(Y[:,0]<=0.0))[0]
yind2 = np.where((Y[:,0]>=2.0)&(Y[:,0]<=2.5))[0]
xind = np.where((X[0,:]>=-15.0)&(X[0,:]<=90.0))[0]
Z = np.zeros(np.shape(X))
return Z
def wallzero(X,Y):
yind1 = np.where((Y[:,0]>=-0.5)&(Y[:,0]<=0.0))[0]
yind2 = np.where((Y[:,0]>=2.0)&(Y[:,0]<=2.5))[0]
xind = np.where((X[0,:]>=-15.0)&(X[0,:]<=82.5))[0]
xhopperind = np.where((X[0,:]>=-15.0)&(X[0,:]<=0.0))[0]
Z = np.zeros(np.shape(X))
Z[np.ix_(yind1,xind)] = 1.6
Z[np.ix_(yind2,xind)] = 1.6
Z[np.ix_(yind1,xhopperind)] = 2.5
Z[np.ix_(yind2,xhopperind)] = 2.5
return Z
def zero_backstop(X,Y):
yind1 = np.where((Y[:,0]>=-0.5)&(Y[:,0]<=0.0))[0]
yind2 = np.where((Y[:,0]>=2.0)&(Y[:,0]<=2.5))[0]
xind = np.where((X[0,:]>=-15.0)&(X[0,:]<=90.0))[0]
xbackstopind = np.where(X[0,:]<=-4.0)[0]
ybackstopind = np.where((Y[:,0]>=-0.5)&(Y[:,0]<=2.5))[0]
Z = np.zeros(np.shape(X))
Z[np.ix_(ybackstopind,xbackstopind)] = 2.5
return Z
def wallzero_backstop(X,Y):
yind1 = np.where((Y[:,0]>=-0.5)&(Y[:,0]<=0.0))[0]
yind2 = np.where((Y[:,0]>=2.0)&(Y[:,0]<=2.5))[0]
xind = np.where((X[0,:]>=-15.0)&(X[0,:]<=82.5))[0]
xhopperind = np.where((X[0,:]>=-15.0)&(X[0,:]<=0.0))[0]
Z = np.zeros(np.shape(X))
xbackstopind = np.where(X[0,:]<=-4.0)[0]
ybackstopind = np.where((Y[:,0]>=-0.5)&(Y[:,0]<=2.5))[0]
Z[np.ix_(yind1,xind)] = 1.6
Z[np.ix_(yind2,xind)] = 1.6
Z[np.ix_(yind1,xhopperind)] = 2.5
Z[np.ix_(yind2,xhopperind)] = 2.5
Z[np.ix_(ybackstopind,xbackstopind)] = 2.5
return Z
def flume_eta(X,Y):
hopperlen = 4.7
hmax = 1.9
hoppertop = 3.3
topangle = 17.0*np.pi/180.0
flumeangle = 31.0*np.pi/180.0
x0 = -hopperlen
x2 = -hmax*np.cos(0.5*np.pi - flumeangle)
x1 = x2 - hoppertop*np.cos(flumeangle-topangle)
x3 = 0.0
y2 = hmax*np.sin(0.5*np.pi - flumeangle)
y1 = y2 - hoppertop*np.sin(flumeangle-topangle)
slope0 = y1/(x1-x0)
slope1 = (y2-y1)/(x2-x1)
slope2 = -y2/(x3-x2)
yind = np.where((Y[:,0]<=2.0)&(Y[:,0]>=0.0))[0]
x0ind = np.where((X[0,:]>=x0)&(X[0,:]<x1))[0]
x1ind = np.where((X[0,:]>=x1)&(X[0,:]<x2))[0]
x2ind = np.where((X[0,:]>=x2)&(X[0,:]<x3))[0]
#pdb.set_trace()
Z=np.zeros(np.shape(X))
Z[np.ix_(yind,x0ind)] = (X[np.ix_(yind,x0ind)]-x0)*slope0
Z[np.ix_(yind,x1ind)] = y1+(X[np.ix_(yind,x1ind)]-x1)*slope1
Z[np.ix_(yind,x2ind)] = -(x3-X[ | np.ix_(yind,x2ind) | numpy.ix_ |
# Copyright (c) Scanlon Materials Theory Group
# Distributed under the terms of the MIT License.
"""
Module containing the base class for high-symmetry k-point path determination.
"""
import numpy as np
import seekpath
import spglib
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
class Kpath:
r"""Base class providing helper functions for generating k-point paths.
This class should not be used directly. Instead, one of the
:obj:`~sumo.symmetry.brad_crack_kpath.BradCrackKpath`,
:obj:`~sumo.symmetry.seekpath_kpath.SeekpathKpath`, or
:obj:`~sumo.symmetry.custom_kpath.CustomKpath`, subclasses should be used.
The main use of this parent object is for standardisation across the
differing k-point path generation classes.
Args:
structure (:obj:`~pymatgen.core.structure.Structure`): The structure.
symprec (:obj:`float`, optional): The tolerance for determining the
crystal symmetry.
Attributes:
prim (:obj:`~pymatgen.core.structure.Structure`): The standardised
primitive cell structure for the generated k-point path.
conv (:obj:`~pymatgen.core.structure.Structure`): The standardised
conventional cell structure.
"""
def __init__(self, structure, symprec=1e-3):
self.structure = structure
# use sym as a quick way to access the cell data
sym = SpacegroupAnalyzer(structure, symprec=symprec)
self._spg_data = sym.get_symmetry_dataset()
# make primitive and conventional cell from seekpath output
std = spglib.refine_cell(sym._cell, symprec=symprec)
self._seek_data = seekpath.get_path(std)
prim_lattice = self._seek_data["primitive_lattice"]
prim_scaled_positions = self._seek_data["primitive_positions"]
prim_numbers = self._seek_data["primitive_types"]
prim_atoms = [sym._unique_species[i - 1] for i in prim_numbers]
self.prim = Structure(prim_lattice, prim_atoms, prim_scaled_positions)
conv_lattice = self._seek_data["conv_lattice"]
conv_scaled_positions = self._seek_data["conv_positions"]
conv_numbers = self._seek_data["conv_types"]
conv_atoms = [sym._unique_species[i - 1] for i in conv_numbers]
self.conv = Structure(conv_lattice, conv_atoms, conv_scaled_positions)
def correct_structure(self, atol=1e-8):
"""Determine if the structure matches the standard primitive structure.
The standard primitive will be different between seekpath and pymatgen
high-symmetry paths, but this is handled by the specific subclasses.
Args:
atol (:obj:`float`, optional): Absolute tolerance used to compare
the input structure with the primitive standard structure.
Returns:
bool: ``True`` if the structure is the same as the standard
primitive, otherwise ``False``.
"""
return np.allclose(
self.structure.lattice.matrix, self.prim.lattice.matrix, atol=atol
)
def get_kpoints(self, line_density=20, cart_coords=False, phonopy=False):
r"""Return a list of k-points and labels along the high-symmetry path.
The format of the returned data will be different if phonopy is
``True`` or ``False``. This is because phonopy requires the labels and
kpoints to be provided in a different format than kgen.
Adapted from
:obj:`pymatgen.symmetry.bandstructure.HighSymmKpath.get_kpoints`.
Args:
line_density (:obj:`int`, optional): Density of k-points along the
path.
cart_coords (:obj:`bool`, optional): Whether the k-points are
returned in cartesian or reciprocal coordinates. Defaults to
``False`` (fractional coordinates).
phonopy (:obj:`bool`, optional): Format the k-points and labels for
use with phonopy. Defaults to ``False``.
Returns:
tuple: A :obj:`tuple` of the k-points along the high-symmetry path,
and k-point labels. Returned as ``(kpoints, labels)``.
If ``phonopy == False``, then:
* ``kpoints`` is a :obj:`numpy.ndarray` of the k-point
coordinates along the high-symmetry path. For example::
[[0, 0, 0], [0.25, 0, 0], [0.5, 0, 0], [0.5, 0, 0.25],
[0.5, 0, 0.5]]
* ``labels`` is a :obj:`list` of the high symmetry labels for
each k-point (will be an empty :obj:`str` if the k-point has
no label). For example::
['\Gamma', '', 'X', '', 'Y']
If ``phonopy == True``, then:
* ``kpoints`` is a :obj:`list` of :obj:`numpy.ndarray`
containing the k-points for each branch of the band
structure. This means that the first and last k-points of a
particular branch may be repeated. For example::
[[[0, 0, 0], [0.25, 0, 0], [0.5, 0, 0]],
[[0.5, 0, 0], [0.5, 0, 0.25], [0.5, 0, 0.5]]]
* ``labels`` is a :obj:`list` of the high symmetry labels.
For example::
['\Gamma', 'X', 'Y']
"""
list_k_points = []
sym_point_labels = []
recip_lattice = self.structure.lattice.reciprocal_lattice
for b in self.path:
for i in range(1, len(b)):
start = | np.array(self.kpoints[b[i - 1]]) | numpy.array |
from EP_N_Env import *
import math
import random
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from pdb import set_trace as bp
Tensor = torch.FloatTensor
def get_input_optimizer(input_array):
# this line to show that input is a parameter that requires a gradient
optimizer = optim.Adam([input_array.requires_grad_()], lr=8e-2)
return optimizer
class Ave_D_Loss(nn.Module):
def __init__(self, net, loads, N_node_in, N_node_out):
super(Ave_D_Loss, self).__init__()
self.net = net.eval()
# loads should be 1 X N_node
self.load_mtx = loads
self.load_mtx.requires_grad = False
self.N_node_in = N_node_in
self.N_node_out = N_node_out
def forward(self, in_x):
# X source X dest
# loads dest
x_portion = torch.nn.functional.softmax(in_x, dim=2)
x_final = x_portion
ave_delay = -1*self.net(self.load_mtx.view(-1).unsqueeze(0), x_portion.view(-1).unsqueeze(0))
return x_final, ave_delay
class Critic(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, int(hidden_size/2))
self.linear3 = nn.Linear(int(hidden_size/2), output_size)
def forward(self, stt, act):
x = torch.cat([stt, act], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class Agent(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
s_dim = self.env.observation_shape
a_dim = self.env.action_shape
self.N_in = self.env.N_node_in
self.N_out = self.env.N_node_out
self.N_mid = self.env.N_node_mid
self.N_init = 1000
self.critic = Critic(a_dim+s_dim, 256*2, 1)
self.critic_target = Critic(a_dim+s_dim, 256*2, 1)
self.critic_optim = optim.Adam(self.critic.parameters(), lr = self.critic_lr)
self.buffer = []
self.critic_target.load_state_dict(self.critic.state_dict())
def act(self, s0):
s0 = torch.tensor(s0, dtype=torch.float).unsqueeze(0)
load_temp = Variable(Tensor(s0), requires_grad=False)
x_init = Tensor(np.random.normal(0.0, 1.0, (self.N_init, self.N_in*self.N_out, self.N_mid)))
x_init = torch.nn.functional.softmax(x_init, dim=2)
d_temp_a = -1*self.critic(load_temp.unsqueeze(0).repeat(self.N_init, 1, 1).view(self.N_init, -1), x_init.view(self.N_init, -1))
D_loss_i = Ave_D_Loss(self.critic_target, load_temp, self.N_in, self.N_out)
init_n_min = torch.argmin(d_temp_a, dim=0)
x_chosen = x_init[init_n_min]
x = Variable(x_chosen, requires_grad = True)
optimizer = get_input_optimizer(x)
opt_step = 0
while opt_step < 100:
opt_step = opt_step + 1
def closure():
optimizer.zero_grad()
x_temp, d_temp = D_loss_i(x)
d_temp.backward()
delay_temp = d_temp.item()
optimizer.step(closure)
x2 = torch.nn.functional.softmax(x, dim=2)
x2 = x2.detach().numpy()
return x2
def put(self, *transition):
if len(self.buffer)== self.capacity:
self.buffer.pop(0)
self.buffer.append(transition)
def clear(self):
self.buffer.clear()
def learn(self):
if len(self.buffer) < self.batch_size:
return
samples = random.sample(self.buffer, self.batch_size)
s0, a0, r1, s1 = zip(*samples)
s0 = torch.tensor(s0, dtype=torch.float)
s0 = s0.unsqueeze(1)
s0 = s0.view(self.batch_size, -1)
a0 = torch.tensor(a0, dtype=torch.float).view(self.batch_size,-1)
r1 = torch.tensor(r1, dtype=torch.float).view(self.batch_size,-1)
s1 = torch.tensor(s1, dtype=torch.float)
def critic_learn():
y_pred = self.critic(s0, a0)
loss_fn = nn.MSELoss()
loss = loss_fn(y_pred, r1)
self.critic_optim.zero_grad()
loss.backward()
self.critic_optim.step()
def soft_update(net_target, net, tau):
for target_param, param in zip(net_target.parameters(), net.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
critic_learn()
soft_update(self.critic_target, self.critic, self.tau)
def Run_Simulation(rand_seed_n):
rep_time = 1
N_in = 4
N_out = 16
N_mid = 8
N_pair = 4
env = EP_Env(N_in, N_out, N_mid, N_pair, rep_time, rand_seed_n*10)
s0 = env.initial_state()
s_mean = np.mean(s0)
s_std = np.std(s0)
s_max = np.max(s0)
RL_delays = np.zeros((10000, N_pair))
O_delays = np.zeros((10000, N_pair))
E_delays = | np.zeros((10000, N_pair)) | numpy.zeros |
#!/bin/env python3
import cv2
import numpy as np
import time
def get_triangulation_indices(points):
"""Get indices triples for every triangle
"""
# Bounding rectangle
bounding_rect = (*points.min(axis=0), *points.max(axis=0))
# Triangulate all points
subdiv = cv2.Subdiv2D(bounding_rect)
#import pdb;pdb.set_trace()
pt_list = [(int(p[0]),int(p[1])) for p in points]
subdiv.insert(pt_list)
# Iterate over all triangles
for x1, y1, x2, y2, x3, y3 in subdiv.getTriangleList():
# Get index of all points
yield [(points==point).all(axis=1).nonzero()[0][0] for point in [(x1,y1), (x2,y2), (x3,y3)]]
def crop_to_triangle(img, triangle):
"""Crop image to triangle
"""
# Get bounding rectangle
bounding_rect = cv2.boundingRect(triangle)
# Crop image to bounding box
img_cropped = img[bounding_rect[1]:bounding_rect[1] + bounding_rect[3],
bounding_rect[0]:bounding_rect[0] + bounding_rect[2]]
# Move triangle to coordinates in cropped image
triangle_cropped = [(point[0]-bounding_rect[0], point[1]-bounding_rect[1]) for point in triangle]
return triangle_cropped, img_cropped
def transform(src_img, src_points, dst_img, dst_points):
"""Transforms source image to target image, overwriting the target image.
"""
src_points = np.array(src_points, np.int32)
dst_points = | np.array(dst_points, np.int32) | numpy.array |
import numpy as np
from EOkit.EOkit import lib
from EOkit.array_utils import check_type, check_contig
from cffi import FFI
ffi = FFI()
def single_sav_golay(y_input, window_size, order, deriv=0, delta=1):
"""Run a single Savitzky-golay filter on 1D data.
The Savitzky-golay smoother fits a polynomial to sliding windows of data
using a least squares fit. The implementation I have used is similar to that
found in the SciPy cookbook: https://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay.
Parameters
----------
y_input : ndarray of type float, size (N)
The inputs that are to be smoothed.
window_size : int
The size of the sliding window. Generally, the larger the window of data
points, the smoother the resultant data.
order : int
Order of polynomial to fit the data with. Needs to be less than
window_size - 1.
deriv : int, optional
Order of the derivative to smooth, by default 0
delta : int, optional
The spacing of the samples to which the filter is applied, by default 1
Returns
-------
ndarray of type float, size (N)
Smoothed data at y inputs.
Examples
--------
Below is a simple example of how to use the Savitzky-golay smoother.
>>> data_len = 1000
>>> vci = (np.sin(np.arange(0, data_len, 1., dtype=float))
>>> + np.random.standard_normal(data_len) * 2))
>>> rust_smoothed_data = sav_golay.single_sav_golay(vci, 7, 2, 0, 1)
References
----------
.. [1] <NAME>, <NAME>, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
"""
# TODO! Condense all this stuff into a function accross the smoothers.
data_len = len(y_input)
result = np.empty(data_len, dtype=np.float64)
result = check_contig(result)
y_input = check_contig(y_input)
y_input_ptr = ffi.cast("double *", y_input.ctypes.data)
result_ptr = ffi.cast("double *", result.ctypes.data)
lib.rust_single_sav_golay(
y_input_ptr, result_ptr, result.size, window_size, order, deriv, delta
)
return result
def multiple_sav_golays(y_inputs, window_size, order, deriv=0, delta=1, n_threads=-1):
"""Run many Savitzky-golay smoothers on 1D data in a multithread manner.
This runs an identical algorithm to the single_sav_golay function. However,
this functions takes a list of y_inputs. Rust is then used under the hood
to multithread each Savitzky-golay smoother as a task leading to faster
computation for pixel-based problems!
I have used a list here as apposed to a 2-D array so that arrays of different
lengths can be supplied.
Parameters
----------
y_inputs : list of ndarrays of type float, size (N)
A list of numpy arrays containing the values to be smoothed.
window_size : int
The size of the sliding window. Generally, the larger the window of data
points, the smoother the resultant data.
order : int
Order of polynomial to fit the data with. Needs to be less than
window_size - 1.
deriv : int, optional
Order of the derivative to smooth, by default 0
delta : int, optional
The spacing of the samples to which the filter is applied, by default 1
n_threads : int, optional
Amount of worker threads spawned to complete the task. The default is -1
which uses all logical processor cores. To tone this down, use something
between 1 and the number of processor cores you have. Setting this value
to a number larger than the amount of logical cores you have will most
likely degreade performance, by default -1
Returns
-------
list of ndarrays of type float, size (N)
A list of numpy arrays containing the smoothed data at y_inputs.
References
----------
.. [1] <NAME>, <NAME>, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
"""
index_runner = 0
start_indices = [0]
for y_input in y_inputs[:-1]:
length_of_input = len(y_input)
index_runner += length_of_input
start_indices.append(index_runner)
start_indices = np.array(start_indices, dtype=np.uint64)
y_input_array = | np.concatenate(y_inputs) | numpy.concatenate |
import os
import pytest
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics import TopKCategoricalAccuracy
def test_zero_div():
acc = TopKCategoricalAccuracy(2)
with pytest.raises(
NotComputableError, match=r"TopKCategoricalAccuracy must have at least one example before it can be computed"
):
acc.compute()
def test_compute():
acc = TopKCategoricalAccuracy(2)
y_pred = torch.FloatTensor([[0.2, 0.4, 0.6, 0.8], [0.8, 0.6, 0.4, 0.2]])
y = torch.ones(2).long()
acc.update((y_pred, y))
assert isinstance(acc.compute(), float)
assert acc.compute() == 0.5
acc.reset()
y_pred = torch.FloatTensor([[0.4, 0.8, 0.2, 0.6], [0.8, 0.6, 0.4, 0.2]])
y = torch.ones(2).long()
acc.update((y_pred, y))
assert isinstance(acc.compute(), float)
assert acc.compute() == 1.0
def top_k_accuracy(y_true, y_pred, k=5, normalize=True):
import numpy as np
# Taken from
# https://github.com/scikit-learn/scikit-learn/blob/4685cb5c50629aba4429f6701585f82fc3eee5f7/
# sklearn/metrics/classification.py#L187
if len(y_true.shape) == 2:
y_true = | np.argmax(y_true, axis=1) | numpy.argmax |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.