prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
from math import pi, sin, cos
from numpy import array
from shapely import wkt
from shapely.geometry import Polygon, Point, mapping
import matplotlib.pyplot as plt
class exercise2:
def __init__(self, start, goal, center, length, is_coord=False):
self.center = center
self.length = length
self.goal = goal
self.start = start
self.radius = [((2*self.length**2)**.5)/2, ((2*self.length**2)**.5)/2]
self.epsi = 0.1
self.is_coord = is_coord
def potential_func(self):
# fig = plt.figure()
pot_fun = float('inf')
pot_att = float('inf')
scale = .2
n = .5
iters = 0
step_size = .2
d_goal_start = 1
d_q_start = 0.5
q = [self.start]
prev_nor = 0
prev_q = self.start
while np.linalg.norm(array(q[iters])-array(self.goal)) > self.epsi:
if self.is_coord:
distance_from_obs_edge, edge_point = self.dis_to_obs(
q[iters])
else:
distance_from_obs_edge, edge_point = self.get_dis_to_obs(
q[iters])
if np.linalg.norm(array(q[iters])-array(self.goal)) <= d_goal_start:
pot_att = scale * (array(q[iters])-array(self.goal))
else:
pot_att = d_goal_start * scale * \
(array(q[iters]) - array(self.goal)) / \
np.linalg.norm(array(q[iters])-array(self.goal))
pot_repu_i = 0
pot_repu = 0
for i in range(0, len(self.center)):
if distance_from_obs_edge[i] <= d_q_start:
num = 1/d_q_start-1/distance_from_obs_edge[i]
grad = (array(q[iters])-array(edge_point[i])) / \
array(distance_from_obs_edge[i])
d_sq = 1/distance_from_obs_edge[i]**2
pot_repu_i = n*num*grad*d_sq
else:
pot_repu_i = 0
pot_repu += pot_repu_i
pot_fun = pot_att+pot_repu
# print(prev_q, q[iters])
# plt.scatter(q[iters][0], q[iters][1])
# plt.pause(0.001)
if prev_q[0] == q[iters][0] and prev_q[1] == q[iters][1]:
q[iters][0] = q[iters][0]+step_size/100
# print(q)
q += [q[iters]-step_size*pot_fun]
# print(q)
prev_q = q[iters]
iters += 1
return q, pot_fun
def get_dis_to_obs(self, position):
num_obs = len(self.center)
distance_from_obs_edge = []
edge_point = []
for i in range(0, num_obs):
obs_position = self.center[i]
distance_from_obs_edge.append(np.linalg.norm(
np.array(obs_position)- | np.array(position) | numpy.array |
# -*- coding: utf-8 -*-
"""
=============================================================================
Title : Generate and add noise to incoming data
Project : Simulation environment for BckTrk app
File : AWGN.py
-----------------------------------------------------------------------------
Description :
References :
-
-----------------------------------------------------------------------------
Revisions :
Date Version Name Description
26-Mai-2018 1.0 Rami File created
=============================================================================
"""
import numpy as np
import Navigation.Coordinates as cord
from Helper_functions.framework_error import CFrameworkError
from Helper_functions.framework_error import CErrorTypes
# Bring the logger
import logging
logger = logging.getLogger('BckTrk')
# the main processing function
def noise_generator(params, positions_wm, noise_level):
data_obj = cAWGN(params, noise_level)
data_obj.generate_noisy_signal_dist()
data_obj.add_noise_signal(positions_wm)
noise_sq = np.sqrt(data_obj.noise[0, :] ** 2 + data_obj.noise[1, :] ** 2)
return data_obj.m_noisy_positions_wm, data_obj.m_noisy_positions_latlon, noise_sq
class cAWGN:
# Constructor
def __init__(self, struct, noise_level):
logger.debug("Initializing cAWGN")
self.m_acquisition_length = struct['acquisition_length']
self.m_noise_std = struct['noise_std_meter']
self.m_noisy_positions_latlon = np.zeros((2, self.m_acquisition_length))
self.m_noisy_positions_wm = np.zeros((2, self.m_acquisition_length))
self.m_noise_level = noise_level
self.noise = np.transpose(
np.random.multivariate_normal([0, 0], [[1 ** 2, 0], [0, 1 ** 2]], self.m_acquisition_length))
self.noise_dist = | np.zeros(self.m_acquisition_length) | numpy.zeros |
from unittest.mock import patch
import datetime as dt
import os
import unittest
from nose.tools import assert_equals, assert_true, assert_false, assert_raises
import numpy as np
import numpy.testing as npt
import pandas as pd
import seaice.data as sid
import seaice.data.api as api
import seaice.data.errors as e
import seaice.data.getter as getter
import seaice.data.gridset_filters as gf
import seaice.nasateam as nt
TEST_ROOT = [os.path.join(
os.path.dirname(__file__),
os.path.pardir, os.path.pardir, os.path.pardir, os.path.pardir,
'test_data',
'seaice.data'
)]
class Test_concentration_daily(unittest.TestCase):
def test_concentration_daily(self):
result = sid.concentration_daily(hemisphere=nt.NORTH, year=2001,
month=1, day=7, search_paths=TEST_ROOT)
actual = result['data'].shape
rows, cols = nt.NORTH['shape']
expected = (rows, cols)
assert_false(np.all(result['data'] == 255.))
assert_equals(expected, actual)
def test_missing_day_returns_empty_grid(self):
result = sid.concentration_daily(hemisphere=nt.NORTH, year=2002,
month=1, day=1, search_paths=TEST_ROOT)
actual = result['data'].shape
rows, cols = nt.NORTH['shape']
expected = (rows, cols)
assert_true(np.all(result['data'] == 255.))
assert_equals(expected, actual)
def test_missing_day_raises_when_asked_to(self):
assert_raises(e.SeaIceDataNoData, sid.concentration_daily,
hemisphere=nt.NORTH, year=2002,
month=1, day=1, search_paths=TEST_ROOT,
allow_empty_gridset=False)
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.datastore.get_bad_days_for_hemisphere')
@patch('seaice.data.locator.daily_file_path')
def test_with_bad_date_and_empty_gridset_not_allowed(self,
mock_daily_file_path,
mock_get_bad_days_for_hemisphere,
mock__concentration_gridset_by_filelist):
files = ['doesnt_matter1.bin',
'doesnt_matter2.bin'
'doesnt_matter3.bin']
mock_daily_file_path.return_value = files
shape = (5, 5, 2)
missing = 255
mock__concentration_gridset_by_filelist.return_value = {
'data': np.full(shape, missing, dtype=np.int),
'metadata': {
'period_index': pd.period_range('1980-10-21', '1980-10-23', freq='D'),
'missing_value': 255,
'files': files
}
}
bad_dates = pd.period_range('1980-10-20', '1980-10-27', freq='D')
mock_get_bad_days_for_hemisphere.return_value = bad_dates
with self.assertRaises(e.SeaIceDataNoData):
sid.concentration_daily(nt.NORTH,
1980, 10, 25,
['/who/cares'],
interpolation_radius=0,
allow_empty_gridset=False,
allow_bad_dates=False)
@patch('seaice.datastore.get_bad_days_for_hemisphere')
@patch('seaice.data.gridset_filters._interpolate_missing')
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.locator.daily_file_path')
def test_daily_multiple_files_interpolated(self, mock_daily_file_path,
_mockgridset_by_filelist, mock__interpolate_missing,
mock_get_bad_days_for_hemisphere):
mock_get_bad_days_for_hemisphere.return_value = []
files = ['nt_20150831_n07_v1.1_s.bin',
'nt_20150901_n07_v1.1_s.bin',
'nt_20150902_n07_v1.1_s.bin']
gridset = {'data': np.full((2, 2, 3), 2, dtype=np.int),
'metadata': {'files': files,
'period_index': pd.period_range(start='2015-08-31',
end='2015-09-02',
freq='D')}}
mock_daily_file_path.return_value = files
_mockgridset_by_filelist.return_value = gridset
interpolated = np.full((2, 2), 2, dtype=np.int)
mock__interpolate_missing.return_value = interpolated
hemisphere = nt.NORTH
search_paths = ['/anyroot']
# act
sid.concentration_daily(hemisphere, 2015, 9, 1, search_paths, interpolation_radius=1)
# assert
getter._concentration_gridset_by_filelist.assert_called_with(files)
npt.assert_array_equal(mock__interpolate_missing.call_args[0][0], interpolated)
npt.assert_array_equal(mock__interpolate_missing.call_args[0][1],
np.full((2, 2, 2), 2, dtype=np.int))
@patch('seaice.datastore.get_bad_days_for_hemisphere')
@patch('seaice.data.gridset_filters._interpolate_missing')
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.locator.daily_file_path')
def test_no_interpolation_needed_only_includes_file_for_date(self,
mock_daily_file_path,
mock__gridset_by_filelist,
mock__interpolate_missing,
mock_get_bad_days_for_hemisphere):
mock_get_bad_days_for_hemisphere.return_value = []
files = ['nt_20112131_n07_v1.1_s.bin',
'nt_20120101_n07_v1.1_s.bin',
'nt_20120102_n07_v1.1_s.bin']
gridset = {'data': np.full((2, 2, 3), 4, dtype=np.int),
'metadata': {'files': files,
'period_index': pd.period_range(start='2011-12-31',
periods=3,
freq='D')}}
mock_daily_file_path.return_value = files
mock__gridset_by_filelist.return_value = gridset
mock__interpolate_missing.return_value = np.full((2, 2), 4, dtype=np.int)
interpolation_radius = 1
nt_hemi = nt.NORTH
actual_gridset = sid.concentration_daily(nt_hemi,
2012,
1,
1,
['/anypaths'],
interpolation_radius=interpolation_radius)
actual = actual_gridset['metadata']['files']
expected = ['nt_20120101_n07_v1.1_s.bin']
self.assertEqual(actual, expected)
class Test_concentration_daily_average_over_date_range(unittest.TestCase):
def test_concentration_daily_average_over_date_range(self):
date_range = pd.DatetimeIndex(['2001-01-06', '2001-01-07'])
result = sid.concentration_daily_average_over_date_range('N',
date_range,
search_paths=TEST_ROOT)
actual = result['data'].shape
rows, cols = nt.NORTH['shape']
expected = (rows, cols)
assert_false(np.all(result['data'] == 255.))
assert_equals(expected, actual)
def test_different_from_each_day(self):
date_range = pd.DatetimeIndex(['2001-01-06', '2001-01-07'])
first = sid.concentration_daily(hemisphere=nt.NORTH, year=2001,
month=1, day=6, search_paths=TEST_ROOT)
last = sid.concentration_daily(hemisphere=nt.NORTH, year=2001,
month=1, day=7, search_paths=TEST_ROOT)
average = sid.concentration_daily_average_over_date_range('N',
date_range,
search_paths=TEST_ROOT)
self.assertFalse(np.all(average['data'] == first['data']))
self.assertFalse(np.all(average['data'] == last['data']))
class Test_concentration_daily___failed_qa_logic(unittest.TestCase):
def setUp(self):
self.day_before_grid = np.full(nt.NORTH['shape'], 1, dtype=np.int)
target_grid = np.full(nt.NORTH['shape'], 2, dtype=np.int)
target_grid[0:3, 0:3] = nt.FLAGS['missing']
self.target_grid = target_grid.copy()
self.day_after_grid = np.full(nt.NORTH['shape'], 11, dtype=np.int)
self.cube = np.dstack([self.day_before_grid, target_grid, self.day_after_grid])
target_grid[0:3, 0:3] = (1 + 11) / 2
self.interpolated_grid = target_grid.copy()
self.empty_grid = np.full(nt.NORTH['shape'], nt.FLAGS['missing'], dtype=np.int)
self.target_date = dt.date(1980, 10, 25)
self.period_index = pd.period_range(start='1980-10-24', end='1980-10-26', freq='D')
self.file_list = ['nt_19801024_n07_v1.1_n.bin',
'nt_19801025_n07_v1.1_n.bin',
'nt_19801026_n07_v1.1_n.bin']
@patch('seaice.data.getter._concentration_gridset_by_filelist')
@patch('seaice.data.locator.daily_file_path')
@patch('seaice.datastore.get_bad_days_for_hemisphere')
def test_good_day_interpolates_with_good_days_with_allow_bad_dates_false_and_empty_false(
self,
mock_get_bad_days_for_hemisphere,
mock_daily_file_path,
mock__concentration_gridset_by_filelist):
allow_empty_gridset = False
allow_bad_dates = False
interpolation_radius = 1
mock_get_bad_days_for_hemisphere.return_value = []
file_list = self.file_list
mock_daily_file_path.return_value = file_list
gridset = {'data': self.cube,
'metadata': {'files': file_list,
'missing_value': nt.FLAGS['missing'],
'period_index': self.period_index,
'valid_data_range': (0, 100)}}
mock__concentration_gridset_by_filelist.return_value = gridset
actual = sid.concentration_daily(nt.NORTH,
self.target_date.year,
self.target_date.month,
self.target_date.day,
['/who/cares'],
interpolation_radius=interpolation_radius,
allow_empty_gridset=allow_empty_gridset,
allow_bad_dates=allow_bad_dates)
expected_grid = self.interpolated_grid
| npt.assert_array_equal(actual['data'], expected_grid) | numpy.testing.assert_array_equal |
import numpy as np
class ENV_net():
def __init__(self,SNR_mat, UE, BS, Bh):
self.UE = UE
self.BS = BS
self.episode = 0
self.step = 0
self.max_level = 500
self.power_default = 37
self.renew_max = 60
self.renew_min = 37
self.tx_dBm = 30
self.tx_w = np.power(10, self.tx_dBm/10)/1000
self.delta = 2.6
self.grid_power_min = 200
self.grid_power_max = 200
self.QoS_pool = np.array([0.192,2.22,1.5,0.6,4.44]) ## Mbps [Audio, Video, Image, Web_bro, Email]
self.SNR_mat = SNR_mat
self.renew = (np.arange(self.BS) != self.BS-1).astype(int) #np.array([1,1,1,0])
self.grid_power = self.grid_power_min + (self.grid_power_max-self.grid_power_min)*np.random.uniform(size= [self.BS])
self.grid_power = self.grid_power * (1-self.renew)
self.Backhaul_lim = Bh
self.Backhaul = self.Backhaul_lim + (50-self.Backhaul_lim )*(1-self.renew)
self.RB = 100
self.BW = 2e+7
self.action_size_PC = self.BS
self.action_size_UA = 2*self.BS
self.reward_size_PC = 2 * self.BS #3*self.BS
self.reward_size_UA = 4*self.BS #+ 2*self.UE
self.state_size_PC = self.UE*self.BS + 5*self.BS + self.UE
self.state_size_UA = self.UE*self.BS + 2* self.BS + self.UE
self.QoS_penalty = 4.0
self.Backhaul_penalty =100
def reset(self,is_test=False):
if is_test:
self.UA_set = np.arange(self.UE)
self.H_mat = self.SNR_mat[self.episode, :, :,:].copy()
else:
self.UA_set = np.random.permutation(self.SNR_mat.shape[2])[0:self.UE]
self.H_mat = self.SNR_mat[np.mod(self.episode+int(np.random.uniform(0,self.SNR_mat.shape[0])) ,self.SNR_mat.shape[0]), :, :,:].copy()
self.H_mat = self.H_mat[:,self.UA_set,:].copy()
H = self.H_mat[0,:,:].copy()
UA = np.zeros([self.UE]).astype(int)
for i in range(self.UE):
BS_ind = np.mod(i, self.BS)
UE_ind = np.argmax(H[:,BS_ind])
H[UE_ind,:] = -1.0
UA[BS_ind * int(self.UE/self.BS) + int(i/self.BS)] = UE_ind
self.H_mat = self.H_mat[:,UA,:].copy()
self.H = self.H_mat[0, :,:].copy()
self.QoS = np.random.choice(self.QoS_pool.shape[0],[self.UE,1])+0.0
for i in range(self.QoS_pool.shape[0]):
self.QoS[self.QoS == i] = self.QoS_pool[i]
self.QoS[self.QoS==2.22] = (0.6 + (1.4-0.6)*np.random.uniform(size= [np.sum(self.QoS==2.22)]) )
self.QoS[self.QoS==4.44] = (2.0 + (6.0-2.0)*np.random.uniform(size= [np.sum(self.QoS==4.44)]) )
self.b_level = 100 * self.renew
self.res_source =(self.renew_min+ (self.renew_max - self.renew_min)*np.random.uniform(size = [self.BS]))*self.renew
self.state_PC = np.concatenate([(self.H*(self.b_level + self.grid_power - self.power_default)/260).reshape([1,-1])/2000.0, self.QoS.reshape([1,-1]), (np.maximum(self.b_level+self.grid_power - self.power_default,0.0).reshape([1,-1])/260.0), self.b_level.reshape([1,-1])/260, self.grid_power.reshape([1,-1])/260, (self.res_source).reshape([1,-1])/260,self.Backhaul.reshape([1,-1])/10.0], axis=1)
def get_state_UA(self):
self.P = np.clip( (self.renew * (0.01 + 0.69 * (self.action[0,0:self.BS]+1)/2) + (1-self.renew) * (0.01 + 0.99 * (self.action[0,0:self.BS]+1)/2))*(self.b_level + self.grid_power -self.power_default)/self.delta/self.RB,
0, 1).reshape([1,-1])
SNR = self.H * self.P
SINR = SNR/ ( 1+ np.tile(np.sum(SNR,axis=1,keepdims=True),[1, self.BS]) - SNR)
self.Rate = np.log2(1+SINR)*100*0.18 + 0.001
self.state_UA = np.concatenate([np.max(self.Rate,axis=0).reshape([1,-1]),-np.log(1+self.QoS/self.Rate).reshape([1,-1]),self.Backhaul.reshape([1,-1]), self.QoS.reshape([1,-1])],axis=1)
def get_X(self, Rate, QoS, Backhaul, mu, rho, is_print=False):
mu = np.expand_dims(mu,axis=1)
rho = np.expand_dims(rho,axis=1)
Backhaul = np.expand_dims(Backhaul,axis=1)
X = (np.expand_dims(np.argmax(Rate,axis=2),2) == np.arange(self.BS).reshape([1,1,self.BS]))+0.0
lamb = np.max(Rate*X,axis=1,keepdims=True)
count = 0
while 1:
lamb_old = lamb.copy()
if X.shape[0] > 0:
UE_order = np.random.permutation(self.UE)
else:
UE_order = np.argsort(np.min(np.maximum(QoS/Rate, QoS/Backhaul)[0,:,:],axis=1))
for UE_ind in UE_order:
X[:,UE_ind,:] = 0
lamb = np.max(Rate*X,axis=1,keepdims=True)
UE_opt = -(1+mu)*QoS * lamb/Rate - rho * QoS
## Tie Break
UE_select = np.argmax(UE_opt[:,UE_ind,:],axis=1)[0]
BB= -UE_opt[0,UE_ind,:].copy()
indices = np.argsort(BB,axis=0)
R_remain = 1-np.sum(np.sum(QoS/Rate*X,axis=1),axis=0)
B_remain = Backhaul[0,0,:] - np.sum(np.sum(QoS*X,axis=1),axis=0)
if R_remain[UE_select] < QoS[0,UE_ind,0]/Rate[0,UE_ind,UE_select] or B_remain[UE_select] < QoS[0,UE_ind,0]:
X[:,UE_ind,:] = 0.0
X[:,UE_ind,:] = (UE_select == np.arange(self.BS).reshape([1,self.BS]))+0.0
Y = self.get_Y(X[0,:,:],mu[0,:,:],rho[0,:,:])
reward_org = np.sum(self.Rate * X[0,:,:] * Y)/40 - np.sum(self.QoS > np.sum(self.Rate * X[0,:,:]*Y,axis=1,keepdims=True)+1e-7)/self.UE * 40
for B_ind in indices:
if abs(np.log(abs(BB[UE_select] / BB[B_ind])))<0.5:
X[:,UE_ind,:] = 0
X[:,UE_ind,:] = (B_ind == np.arange(self.BS).reshape([1,self.BS]))+0.0
Y=self.get_Y(X[0,:,:],mu[0,:,:],rho[0,:,:])
reward_new = np.sum(self.Rate * X[0,:,:] * Y)/40 - np.sum(self.QoS > np.sum(self.Rate * X[0,:,:]*Y,axis=1,keepdims=True)+1e-7)/self.UE * 40
if reward_new >reward_org:
UE_select = B_ind
break
X[:,UE_ind,:] = 0.0
X[:,UE_ind,:] = (UE_select == np.arange(self.BS).reshape([1,self.BS]))+0.0
lamb = np.max(Rate*X,axis=1,keepdims=True)
if np.sum(abs(lamb_old-lamb)>1e-7) == 0:
count = count+1
if count > 1:
break
Y = QoS / Rate * X #[Batch, UE, BS]
Y_opt = Y.copy()
Y_opt[Y_opt==0] = 9999999.9
Y_s = np.sort(Y_opt,axis=1)
QoS_tile = np.tile(QoS, [1,1,self.BS])
ind = np.argsort(Y_opt,axis=1)
QoS_s = np.take_along_axis(QoS_tile, ind, axis=1)
fail_rate = 1-np.sum((np.cumsum(Y_s,axis=1) < 1) * (np.cumsum(QoS_s,axis=1)<Backhaul) )/self.UE
return X.copy(), fail_rate
def get_Y(self,X,mu,rho):
Z = (np.argmax(self.Rate*X,axis=0).reshape([1,-1]) == np.arange(self.UE).reshape([-1,1]))* X+0.0
Y = self.QoS/(self.Rate)*X
for BS_ind in range(self.BS):
while | np.sum(Y[:,BS_ind]) | numpy.sum |
"""
Copyright (c) 2018, <NAME>, <NAME>, <NAME>
https://github.com/spagliarini
Mnemosyne team, Inria, Bordeaux, France
https://team.inria.fr/mnemosyne/fr/
Distributed under the BSD-2-Clause License
PLOT: comparison between different normalizations
(Fig. 4)
"""
import os
import numpy as np
import matplotlib.pyplot as plt
csfont = {'fontname':'Times New Roman'}
os.chdir('C://Users//Mnemosyne//Documents//Python Scripts//InverseModelBirdsong//results//IMsimple_model//AllNormalizations//NormalizedAN') #or NormalizedMN
#load distance files
err_wn=np.load('Distance_simple.npy')
err_2avrg=np.load('Distance_2avrg.npy')
err_2norm=np.load('Distance_2norm.npy')
err_3avrg=np.load('Distance_3avrg.npy')
err_3norm=np.load('Distance_3norm.npy')
err_4avrg=np.load('Distance_4avrg.npy')
err_4norm=np.load('Distance_4norm.npy')
err_5avrg=np.load('Distance_5avrg.npy')
err_5norm=np.load('Distance_5norm.npy')
#definition of the end_time
end_time=err_wn.shape[1]
#smooth average distance over all the simulations
mean_err_wn=np.zeros((end_time,1))
mean_err_2avrg=np.zeros((end_time,1))
mean_err_2norm=np.zeros((end_time,1))
mean_err_3avrg=np.zeros((end_time,1))
mean_err_3norm=np.zeros((end_time,1))
mean_err_4avrg=np.zeros((end_time,1))
mean_err_4norm=np.zeros((end_time,1))
mean_err_5avrg=np.zeros((end_time,1))
mean_err_5norm=np.zeros((end_time,1))
for k in range(0,end_time):
mean_err_wn[k]=np.mean(err_wn[:,k])
mean_err_2avrg[k]=np.mean(err_2avrg[:,k])
mean_err_2norm[k]=np.mean(err_2norm[:,k])
mean_err_3avrg[k]=np.mean(err_3avrg[:,k])
mean_err_3norm[k]=np.mean(err_3norm[:,k])
mean_err_4avrg[k]= | np.mean(err_4avrg[:,k]) | numpy.mean |
from typing import Any, Optional, Union
import numpy as np
class Batch:
def __init__(
self,
obss: np.ndarray,
acts: np.ndarray,
rews: np.ndarray,
dones: np.ndarray,
next_obss: np.ndarray,
indexes: np.ndarray = None,
weights: Optional[np.ndarray] = None,
):
self.obss = obss
self.acts = acts
self.rews = rews
self.dones = dones
self.next_obss = next_obss
self.indexes = indexes
self.weights = weights
class ReplayBuffer:
"""Experience replay
>>> buffer = ReplayBuffer(10, 2)
>>> for i in range(3):
... assert i == len(buffer)
... buffer.add(1, 2, 3, False, 4)
>>> for i in range(20):
... assert 2 == len(buffer)
>>> batch = buffer.sample()
>>> len(batch.obss) == len(batch.acts) == len(batch.rews) == len(batch.dones)
True
>>> len(batch.dones) == len(batch.next_obss) == len(batch.indexes)
True
"""
def __init__(self, buffer_size: int, batch_size: int):
assert buffer_size >= batch_size
self.buffer_size = buffer_size
self.batch_size = batch_size
self.obss: np.ndarray = None
self.acts: np.ndarray = None
self.rews: np.ndarray = None
self.dones: np.ndarray = None
self.next_obss: np.ndarray = None
self.indexes = | np.arange(buffer_size) | numpy.arange |
# -*- coding: utf-8 -*-
#
# Authors: Swolf <<EMAIL>>
# Date: 2021/1/07
# License: MIT License
"""
Common Spatial Patterns and his happy little buddies!
"""
from copy import deepcopy
from typing import Union, Optional, List, Dict, Tuple
from functools import partial
import numpy as np
from numpy import ndarray
from scipy.linalg import eigh, pinv, solve
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import GridSearchCV, StratifiedKFold, ShuffleSplit
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.svm import SVC
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
from sklearn.pipeline import make_pipeline
from .base import robust_pattern, FilterBank
from ..utils.covariance import nearestPD, covariances
def csp_kernel(X: ndarray, y: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
"""The kernel in CSP algorithm based on paper [1]_.
Parameters
----------
X: ndarray
eeg data, shape (n_trials, n_channels, n_samples).
y: ndarray
labels of X, shape (n_trials,).
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Optimal spatial filtering of single trial EEG during imagined hand movement[J]. IEEE transactions on rehabilitation engineering, 2000, 8(4): 441-446.
"""
X, y = np.copy(X), np.copy(y)
labels = np.unique(y)
X = X - np.mean(X, axis=-1, keepdims=True)
if len(labels) != 2:
raise ValueError("the current kernel is for 2-class problem.")
C1 = covariances(X[y==labels[0]])
C2 = covariances(X[y==labels[1]])
# # trace normalization
# # this operation equals to trial normalization
# C1 = C1 / np.trace(C1, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
# C2 = C2 / np.trace(C2, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
C1 = np.mean(C1, axis=0)
C2 = np.mean(C2, axis=0)
Cc = C1 + C2
# check positive-definiteness
Cc = nearestPD(Cc)
# generalized eigenvalue problem
D, W = eigh(C1, Cc)
ix = np.argsort(D)[::-1]
W = W[:, ix]
D = D[ix]
A = robust_pattern(W, C1, W.T@C1@W)
return W, D, A
def csp_feature(W: ndarray, X: ndarray,
n_components: int = 2) -> ndarray:
"""Return CSP features in paper [1]_.
Parameters
----------
W : ndarray
spatial filters from csp_kernel, shape (n_channels, n_filters)
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples)
n_components : int, optional
the first k components to use, usually even number, by default 2
Returns
-------
ndarray
features of shape (n_trials, n_features)
Raises
------
ValueError
n_components should less than the number of channels
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Optimal spatial filtering of single trial EEG during imagined hand movement[J]. IEEE transactions on rehabilitation engineering, 2000, 8(4): 441-446.
"""
W, X = np.copy(W), np.copy(X)
max_components = W.shape[1]
if n_components > max_components:
raise ValueError("n_components should less than the number of channels")
eps = np.finfo(X.dtype).eps
X = X - np.mean(X, axis=-1, keepdims=True)
# normalized variance
features = np.mean(np.square(np.matmul(W[:, :n_components].T, X)), axis=-1)
features = features / (np.sum(features, axis=-1, keepdims=True) + eps)
# log-transformation
features = np.log(np.clip(features, eps, None))
return features
def _rjd(X, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization based on jacobi angle.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
eps : float, optional
Tolerance for stopping criterion (default 1e-8).
n_iter_max : int, optional
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
This is a direct implementation of the Cardoso AJD algorithm [1]_ used in
JADE. The code is a translation of the matlab code provided in the author
website.
References
----------
.. [1] Cardoso, Jean-Francois, and <NAME>. Jacobi angles for simultaneous diagonalization. SIAM journal on matrix analysis and applications 17.1 (1996): 161-164.
"""
# reshape input matrix
A = np.concatenate(X, 0).T
# init variables
m, nm = A.shape
V = np.eye(m)
encore = True
k = 0
while encore:
encore = False
k += 1
if k > n_iter_max:
break
for p in range(m - 1):
for q in range(p + 1, m):
Ip = np.arange(p, nm, m)
Iq = np.arange(q, nm, m)
# computation of Givens angle
g = np.array([A[p, Ip] - A[q, Iq], A[p, Iq] + A[q, Ip]])
gg = np.dot(g, g.T)
ton = gg[0, 0] - gg[1, 1]
toff = gg[0, 1] + gg[1, 0]
theta = 0.5 * np.arctan2(toff, ton +
np.sqrt(ton * ton + toff * toff))
c = | np.cos(theta) | numpy.cos |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 22 11:24:01 2021
@author: ja17375
"""
import pygmt
import numpy as np
import pandas as pd
import xarray as xr
import netCDF4 as nc
def plot_forte_gmt():
tx2008 = np.loadtxt('/Users/ja17375/SWSTomo/ForteModels/Flow_Models/TX2008/forteV2_1deg_150km.txt')
shp = (181, 361)
dg = 15
lat = tx2008[:,1].reshape(shp)
lon = tx2008[:,2].reshape(shp)
Ur = tx2008[:,3].reshape(shp)
Utheta = tx2008[:,4].reshape(shp)*-1 # theta is colat so invert
Uphi = tx2008[:,5].reshape(shp)
hzdeg = ((lat % dg == 0) & (lon % dg == 0))
# Cast Ur (radial velocity) into xarry for pyGMT
U_grid = xr.DataArray(data=np.flipud(Ur),
coords=[('latitude', np.linspace(-90,90,181),
{'units': 'degrees_north'}),
('longitude', np.linspace(-180,180,361),
{'units': 'degrees_east'})],
)
fig = pygmt.Figure()
africa_med = [-25,80,-5,60]
easia = [60,150,10,70]
epac = [-170, -80, 10, 65]
proj = "M15c"
gproj = "Ks12c"
fig.basemap(region=africa_me, projection=proj, frame="afg",)
# Flow model TX2008
# pygmt.makecpt(cmap='roma', series=[-1.5, 1.5], reverse=True)
# fig.grdimage(grid=U_grid)
# fig.colorbar(frame=['a0.5', 'x+l"Vertical Velocity (cm/yr)"' ])
# S40RTS
fig.grdimage(grid='/Users/ja17375/DiscrePy/Data/S40RTS/S40RTS_2800km.grd',
cmap='/Users/ja17375/DiscrePy/Data/S40RTS/S40RTS.cpt')
fig.colorbar(frame=['a0.5', 'x+l"dVs (%)"' ], cmap='/Users/ja17375/DiscrePy/Data/S40RTS/S40RTS.cpt')
fig.coast(shorelines=True)
# flow_ang = np.rad2deg(np.arctan2(np.ravel(Utheta[hzdeg]), np.ravel(Uphi[hzdeg])))
# flow_len = np.sqrt(np.ravel(Utheta[hzdeg])**2 + np.ravel(Uphi[hzdeg])**2)
# flow_data = np.zeros((325, 4))
# flow_data[:,0] = lon[hzdeg]
# flow_data[:,1] = lat[hzdeg]
# flow_data[:,2] = flow_ang
# flow_data[:,3] = flow_len *0.5
# fig.plot(data=flow_data, style = 'v0.2c+e', color='black', pen='1p')
# flow_data[:,2] = flow_data[:,2] + 180
# fig.plot(data=flow_data, style = 'v0c', color='black', pen='1p')
fig.plot(x=130, y=20, direction = [[0], [1]], style = 'v0c', color='black', pen='1p')
data = pd.read_csv('~/DiscrePy/Sheba/Results/Combined/Filt_05Hz/Combined_goodQ.pairs', delim_whitespace=True)
for i, row in data.iterrows():
fig.plot(x=[row['SKS_PP_LON'], row['SKKS_PP_LON']],
y=[row['SKS_PP_LAT'], row['SKKS_PP_LAT']],
pen="1p,black")
if (row['Q_SKS'] >= 0.5):
#Plot split SKS - black circle
fig.plot(x=row['SKS_PP_LON'],
y=row['SKS_PP_LAT'],
style='c0.15c', color='black', pen='black')
vec = np.array([[row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS'], row['TLAG_SKS']*0.5],
[row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS']+180, row['TLAG_SKS']*0.5]])
fig.plot(data=vec, style = 'v0c', color='black', pen='0.75p')
elif (row['Q_SKS'] <= -0.5):
fig.plot(x=row['SKS_PP_LON'],
y=row['SKS_PP_LAT'],
style='c0.15c', color='white', pen='black')
else:
print('Bad Q for SKS')
if (row['Q_SKKS'] >= 0.5):
#Plot split SKKS - black circle
fig.plot(x=row['SKKS_PP_LON'],
y=row['SKKS_PP_LAT'],
style='d0.15c', color='black', pen='black')
vec = np.array([[row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS'], row['TLAG_SKKS']*0.5],
[row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS']+180, row['TLAG_SKKS']*0.5]])
fig.plot(data=vec, style = 'v0c', color='black', pen='0.75p')
elif (row['Q_SKKS'] <= -0.5):
fig.plot(x=row['SKKS_PP_LON'],
y=row['SKKS_PP_LAT'],
style='d0.15c', color='white', pen='black')
fig.savefig('/Users/ja17375/Documents/Thesis-enclosing/Thesis/chapters/chapter02/Figs/Africa_Med_SKS_SKKS_onS40RTS.eps',
crop=True, show=True)
# fig.show(method='external')
def plot_flament(dpath='/Users/ja17375/SWSTomo/FlamentModel',extent='epac'):
nc_vx = nc.Dataset(f'{dpath}/C3-vx-000Ma-2677km.grd')
nc_vy = nc.Dataset(f'{dpath}/C3-vy-000Ma-2677km.grd')
nc_vz = nc.Dataset(f'{dpath}/C3-vz-000Ma-2677km.grd')
vel_conv = 4.9e-4 # converts velocity to cm/year (from N. Flament - see model README.txt)
Utheta = nc_vx['z'][:] * vel_conv *-1 #theta is colat so invert
Uphi = nc_vy['z'][:] * vel_conv # longitudl velocity
Ur = nc_vz['z'][:] * vel_conv # radial velocity
lon, lat = np.meshgrid(nc_vx['lon'][:], nc_vx['lat'][:])
dg = 15
hzdeg = ((lat % dg == 0) & (lon % dg == 0))
U_grid = xr.DataArray(data=np.flipud(Ur),
coords=[('latitude', np.linspace(-90,90,181),
{'units': 'degrees_north'}),
('longitude', np.linspace(-180,180,361),
{'units': 'degrees_east'})],
)
fig = pygmt.Figure()
africa_med = [25,70,-5,50]
fig.basemap(region=africa_med, projection="Ks12c", frame="afg",)
fig.grdimage(grid=U_grid)
fig.coast(shorelines=True)
flow_ang = np.rad2deg(np.arctan2(np.ravel(Utheta[hzdeg]), | np.ravel(Uphi[hzdeg]) | numpy.ravel |
"""Methods for geodetic calculations."""
import os
import numpy
import srtm
import geopy
from geopy.distance import GeodesicDistance
from gewittergefahr.gg_utils import longitude_conversion as lng_conversion
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
RADIANS_TO_DEGREES = 180. / numpy.pi
DEGREES_TO_RADIANS = numpy.pi / 180
MIN_LATITUDE_DEG = -90.
MAX_LATITUDE_DEG = 90.
MIN_LONGITUDE_NEGATIVE_IN_WEST_DEG = -180.
MAX_LONGITUDE_NEGATIVE_IN_WEST_DEG = 180.
MIN_LONGITUDE_POSITIVE_IN_WEST_DEG = 0.
MAX_LONGITUDE_POSITIVE_IN_WEST_DEG = 360.
POSITIVE_LONGITUDE_ARG = 'positive'
NEGATIVE_LONGITUDE_ARG = 'negative'
EITHER_SIGN_LONGITUDE_ARG = 'either'
VALID_LONGITUDE_SIGN_ARGS = [
POSITIVE_LONGITUDE_ARG, NEGATIVE_LONGITUDE_ARG, EITHER_SIGN_LONGITUDE_ARG]
class ElevationFileHandler:
"""File-handler for elevation data.
This class mimics the class `FileHandler` in main.py of the `srtm` package.
"""
working_dir_name = ''
def __init__(self, working_dir_name=None):
"""Creates new instance.
:param working_dir_name: Path to working directory. Elevation files
will be read from here and, if necessary, downloaded to here. If
`working_dir_name is None`, will try to create subdirectory
".cache/srtm" in the home directory.
:raises: ValueError: if `working_dir_name is None` and this method
cannot create ".cache/srtm" in the home directory.
"""
if working_dir_name is None:
if 'HOME' in os.environ:
top_working_dir_name = os.environ['HOME']
elif 'HOMEPATH' in os.environ:
top_working_dir_name = os.environ['HOMEPATH']
else:
raise ValueError('Cannot find home directory.')
working_dir_name = '{0:s}/.cache/srtm'.format(top_working_dir_name)
file_system_utils.mkdir_recursive_if_necessary(
directory_name=working_dir_name)
self.working_dir_name = working_dir_name
def get_srtm_dir(self):
"""Returns path to working directory.
:return: working_dir_name: See doc for `__init__`.
"""
return self.working_dir_name
def exists(self, file_name):
"""Returns flag, indicating whether or not a file exists.
:param file_name: Pathless file name.
:return: does_file_exist: Boolean flag.
"""
full_file_name = '{0:s}/{1:s}'.format(self.get_srtm_dir(), file_name)
return os.path.isfile(full_file_name)
def write(self, file_name, contents):
"""Writes elevation file to working directory.
:param file_name: Pathless file name.
:param contents: Stuff to be written.
"""
full_file_name = '{0:s}/{1:s}'.format(self.get_srtm_dir(), file_name)
with open(full_file_name, 'wb') as f:
f.write(contents)
def read(self, file_name):
"""Reads elevation file from working directory.
:param file_name: Pathless file name.
:return: contents: Stuff contained in file.
"""
full_file_name = '{0:s}/{1:s}'.format(self.get_srtm_dir(), file_name)
with open(full_file_name, 'rb') as f:
return f.read()
def _get_elevation(
latitude_deg, longitude_deg, srtm_data_object=None,
working_dir_name=None):
"""Gets elevation at a single point.
WARNING: Input longitudes in western hemisphere must be negative.
If `srtm_data_object is None`, it will be created on the fly.
:param latitude_deg: Latitude (deg N).
:param longitude_deg: Longitude (deg E).
:param srtm_data_object: Instance of `srtm.data.GeoElevationData`.
:param working_dir_name: See doc for `__init__` in class
`ElevationFileHandler`.
:return: elevation_m_asl: Elevation (metres above sea level).
:return: srtm_data_object: Instance of `srtm.data.GeoElevationData`.
"""
if srtm_data_object is None:
srtm_data_object = srtm.get_data(
file_handler=ElevationFileHandler(working_dir_name))
elevation_m_asl = srtm_data_object.get_elevation(
latitude=latitude_deg, longitude=longitude_deg)
# TODO(thunderhoser): I am concerned about this hack.
if elevation_m_asl is None:
elevation_m_asl = 0.
return elevation_m_asl, srtm_data_object
def find_invalid_latitudes(latitudes_deg):
"""Returns array indices of invalid latitudes.
:param latitudes_deg: 1-D numpy array of latitudes (deg N).
:return: invalid_indices: 1-D numpy array with array indices of invalid
latitudes.
"""
error_checking.assert_is_real_numpy_array(latitudes_deg)
error_checking.assert_is_numpy_array(latitudes_deg, num_dimensions=1)
valid_flags = numpy.logical_and(
latitudes_deg >= MIN_LATITUDE_DEG, latitudes_deg <= MAX_LATITUDE_DEG)
return numpy.where(numpy.invert(valid_flags))[0]
def find_invalid_longitudes(
longitudes_deg, sign_in_western_hemisphere=POSITIVE_LONGITUDE_ARG):
"""Returns array indices of invalid longitudes.
:param longitudes_deg: 1-D numpy array of longitudes (deg E).
:param sign_in_western_hemisphere: Required sign in western hemisphere. May
be "positive", "negative", or "either".
:return: invalid_indices: 1-D numpy array with array indices of invalid
longitudes.
:raises: ValueError: if `sign_in_western_hemisphere` is not one of the 3
aforelisted options.
"""
error_checking.assert_is_real_numpy_array(longitudes_deg)
error_checking.assert_is_numpy_array(longitudes_deg, num_dimensions=1)
error_checking.assert_is_string(sign_in_western_hemisphere)
if sign_in_western_hemisphere == POSITIVE_LONGITUDE_ARG:
valid_flags = numpy.logical_and(
longitudes_deg >= MIN_LONGITUDE_POSITIVE_IN_WEST_DEG,
longitudes_deg <= MAX_LONGITUDE_POSITIVE_IN_WEST_DEG)
elif sign_in_western_hemisphere == NEGATIVE_LONGITUDE_ARG:
valid_flags = numpy.logical_and(
longitudes_deg >= MIN_LONGITUDE_NEGATIVE_IN_WEST_DEG,
longitudes_deg <= MAX_LONGITUDE_NEGATIVE_IN_WEST_DEG)
elif sign_in_western_hemisphere == EITHER_SIGN_LONGITUDE_ARG:
valid_flags = numpy.logical_and(
longitudes_deg >= MIN_LONGITUDE_NEGATIVE_IN_WEST_DEG,
longitudes_deg <= MAX_LONGITUDE_POSITIVE_IN_WEST_DEG)
else:
error_string = (
'\n\n{0:s}Valid options for `sign_in_western_hemisphere` are listed'
' above and do not include "{1:s}".'
).format(str(VALID_LONGITUDE_SIGN_ARGS), sign_in_western_hemisphere)
raise ValueError(error_string)
return numpy.where(numpy.invert(valid_flags))[0]
def get_latlng_centroid(latitudes_deg, longitudes_deg, allow_nan=True):
"""Finds centroid of lat-long points.
P = number of points
:param latitudes_deg: length-P numpy array of latitudes (deg N).
:param longitudes_deg: length-P numpy array of longitudes (deg E).
:param allow_nan: Boolean flag. If True, input arrays may contain NaN's
(however, NaN's must occur at the exact same positions in the two
arrays).
:return: centroid_lat_deg: Centroid latitude (deg N).
:return: centroid_lng_deg: Centroid longitude (deg E).
:raises: ValueError: if allow_nan = True but NaN's do not occur at the same
positions in the two arrays.
"""
error_checking.assert_is_boolean(allow_nan)
error_checking.assert_is_valid_lat_numpy_array(latitudes_deg, allow_nan)
error_checking.assert_is_numpy_array(latitudes_deg, num_dimensions=1)
num_points = len(latitudes_deg)
longitudes_deg = lng_conversion.convert_lng_positive_in_west(
longitudes_deg, allow_nan)
error_checking.assert_is_numpy_array(
longitudes_deg, exact_dimensions=numpy.array([num_points]))
nan_latitude_indices = numpy.where(numpy.isnan(latitudes_deg))[0]
nan_longitude_indices = numpy.where(numpy.isnan(longitudes_deg))[0]
if not numpy.array_equal(nan_latitude_indices, nan_longitude_indices):
error_string = (
'\nNaN''s occur at the following positions in `latitudes_deg`:\n' +
str(nan_latitude_indices) +
'\nand the following positions in `longitudes_deg`:\n' +
str(nan_longitude_indices) +
'\nNaN''s should occur at the same positions in the two arrays.')
raise ValueError(error_string)
return numpy.nanmean(latitudes_deg), numpy.nanmean(longitudes_deg)
def get_elevations(latitudes_deg, longitudes_deg, working_dir_name=None):
"""Returns elevation of each point.
N = number of points
:param latitudes_deg: length-N numpy array of latitudes (deg N).
:param longitudes_deg: length-N numpy array of longitudes (deg E).
:param working_dir_name: See doc for `__init__` in class
`ElevationFileHandler`.
:return: elevations_m_asl: length-N numpy array of elevations (metres above
sea level).
"""
error_checking.assert_is_valid_lat_numpy_array(latitudes_deg)
error_checking.assert_is_numpy_array(latitudes_deg, num_dimensions=1)
num_points = len(latitudes_deg)
longitudes_deg = lng_conversion.convert_lng_negative_in_west(
longitudes_deg, allow_nan=False)
error_checking.assert_is_numpy_array(
longitudes_deg, exact_dimensions=numpy.array([num_points]))
srtm_data_object = None
elevations_m_asl = numpy.full(num_points, numpy.nan)
for i in range(num_points):
elevations_m_asl[i], srtm_data_object = _get_elevation(
latitude_deg=latitudes_deg[i], longitude_deg=longitudes_deg[i],
srtm_data_object=srtm_data_object,
working_dir_name=working_dir_name)
return elevations_m_asl
def start_points_and_displacements_to_endpoints(
start_latitudes_deg, start_longitudes_deg, scalar_displacements_metres,
geodetic_bearings_deg):
"""Computes endpoint from each start point and displacement.
:param start_latitudes_deg: numpy array with latitudes (deg N) of start
points.
:param start_longitudes_deg: equivalent-size numpy array with longitudes
(deg E) of start points.
:param scalar_displacements_metres: equivalent-size numpy array of scalar
displacements.
:param geodetic_bearings_deg: equivalent-size numpy array of geodetic
bearings (from start point to end point, measured clockwise from due
north).
:return: end_latitudes_deg: equivalent-size numpy array with latitudes
(deg N) of endpoints.
:return: end_longitudes_deg: equivalent-size numpy array with longitudes
(deg E) of endpoints.
"""
error_checking.assert_is_valid_lat_numpy_array(
start_latitudes_deg, allow_nan=False)
start_longitudes_deg = lng_conversion.convert_lng_positive_in_west(
start_longitudes_deg, allow_nan=False)
error_checking.assert_is_numpy_array(
start_longitudes_deg,
exact_dimensions=numpy.array(start_latitudes_deg.shape))
error_checking.assert_is_geq_numpy_array(scalar_displacements_metres, 0.)
error_checking.assert_is_numpy_array(
scalar_displacements_metres,
exact_dimensions=numpy.array(start_latitudes_deg.shape))
error_checking.assert_is_geq_numpy_array(geodetic_bearings_deg, 0.)
error_checking.assert_is_leq_numpy_array(geodetic_bearings_deg, 360.)
error_checking.assert_is_numpy_array(
geodetic_bearings_deg,
exact_dimensions=numpy.array(start_latitudes_deg.shape))
end_latitudes_deg = numpy.full(start_latitudes_deg.shape, numpy.nan)
end_longitudes_deg = numpy.full(start_latitudes_deg.shape, numpy.nan)
num_points = start_latitudes_deg.size
for i in range(num_points):
this_start_point_object = geopy.Point(
start_latitudes_deg.flat[i], start_longitudes_deg.flat[i])
this_end_point_object = GeodesicDistance(
meters=scalar_displacements_metres.flat[i]).destination(
this_start_point_object, geodetic_bearings_deg.flat[i])
end_latitudes_deg.flat[i] = this_end_point_object.latitude
end_longitudes_deg.flat[i] = this_end_point_object.longitude
end_longitudes_deg = lng_conversion.convert_lng_positive_in_west(
end_longitudes_deg, allow_nan=False)
return end_latitudes_deg, end_longitudes_deg
def xy_to_scalar_displacements_and_bearings(
x_displacements_metres, y_displacements_metres):
"""For each displacement vector, converts x-y to magnitude and direction.
:param x_displacements_metres: numpy array of eastward displacements.
:param y_displacements_metres: equivalent-size numpy array of northward
displacements.
:return: scalar_displacements_metres: equivalent-size numpy array of total
displacements.
:return: geodetic_bearings_deg: equivalent-size numpy array of geodetic
bearings (from start point to end point, measured clockwise from due
north).
"""
error_checking.assert_is_numpy_array_without_nan(x_displacements_metres)
error_checking.assert_is_numpy_array_without_nan(y_displacements_metres)
error_checking.assert_is_numpy_array(
y_displacements_metres,
exact_dimensions=numpy.array(y_displacements_metres.shape))
scalar_displacements_metres = numpy.sqrt(
x_displacements_metres ** 2 + y_displacements_metres ** 2)
standard_bearings_deg = RADIANS_TO_DEGREES * numpy.arctan2(
y_displacements_metres, x_displacements_metres)
return scalar_displacements_metres, standard_to_geodetic_angles(
standard_bearings_deg)
def scalar_displacements_and_bearings_to_xy(
scalar_displacements_metres, geodetic_bearings_deg):
"""For each displacement vector, converts magnitude and direction to x-y.
:param scalar_displacements_metres: numpy array of total displacements.
:param geodetic_bearings_deg: equivalent-size numpy array of geodetic
bearings (from start point to end point, measured clockwise from due
north).
:return: x_displacements_metres: equivalent-size numpy array of eastward
displacements.
:return: y_displacements_metres: equivalent-size numpy array of northward
displacements.
"""
error_checking.assert_is_geq_numpy_array(scalar_displacements_metres, 0.)
error_checking.assert_is_geq_numpy_array(geodetic_bearings_deg, 0.)
error_checking.assert_is_leq_numpy_array(geodetic_bearings_deg, 360.)
error_checking.assert_is_numpy_array(
geodetic_bearings_deg,
exact_dimensions=numpy.array(scalar_displacements_metres.shape))
standard_angles_radians = DEGREES_TO_RADIANS * geodetic_to_standard_angles(
geodetic_bearings_deg)
return (scalar_displacements_metres * numpy.cos(standard_angles_radians),
scalar_displacements_metres * numpy.sin(standard_angles_radians))
def rotate_displacement_vectors(
x_displacements_metres, y_displacements_metres, ccw_rotation_angle_deg):
"""Rotates each displacement vector by a certain angle.
:param x_displacements_metres: numpy array of eastward displacements.
:param y_displacements_metres: equivalent-size numpy array of northward
displacements.
:param ccw_rotation_angle_deg: Rotation angle (degrees). Each displacement
vector will be rotated counterclockwise by this amount.
:return: x_prime_displacements_metres: equivalent-size numpy array of
"eastward" displacements (in the rotated coordinate system).
:return: y_prime_displacements_metres: equivalent-size numpy array of
"northward" displacements (in the rotated coordinate system).
"""
error_checking.assert_is_numpy_array_without_nan(x_displacements_metres)
error_checking.assert_is_numpy_array_without_nan(y_displacements_metres)
error_checking.assert_is_numpy_array(
y_displacements_metres,
exact_dimensions=numpy.array(y_displacements_metres.shape))
error_checking.assert_is_greater(ccw_rotation_angle_deg, -360.)
error_checking.assert_is_less_than(ccw_rotation_angle_deg, 360.)
ccw_rotation_angle_rad = DEGREES_TO_RADIANS * ccw_rotation_angle_deg
rotation_matrix = numpy.array([
[numpy.cos(ccw_rotation_angle_rad), - | numpy.sin(ccw_rotation_angle_rad) | numpy.sin |
#!/usr/bin/env python
"""
@package ion_functions.data.prs_functions
@file ion_functions/data/prs_functions.py
@author <NAME>, <NAME>
@brief Module containing calculations related to instruments in the Seafloor
Pressure family.
"""
import numexpr as ne
import numpy as np
import scipy as sp
from scipy import signal
"""
Listing of functions, in order encountered.
Functions calculating data products.
BOTTILT:
prs_bottilt_ccmp -- computes the BOTTILT-CCMP_L1 data product
prs_bottilt_tmag -- computes the BOTTILT-TMAG_L1 data product
prs_bottilt_tdir -- computes the BOTTILT-TDIR_L1 data product
BOTSFLU:
prs_botsflu_time15s -- computes the TIME15S-AUX auxiliary data product
prs_botsflu_meanpres -- computes the BOTSFLU-MEANPRES_L2 data product
prs_botsflu_predtide -- computes the BOTSFLU-PREDTIDE_L2 data product
prs_botsflu_meandepth -- computes the BOTSFLU-MEANDEPTH_L2 data product
prs_botsflu_5minrate -- computes the BOTSFLU-5MINRATE_L2 data product
prs_botsflu_10minrate -- computes the BOTSFLU-10MINRATE_L2 data product
prs_botsflu_time24h -- computes the TIME24H-AUX auxiliary data product
prs_botsflu_daydepth -- computes the BOTSFLU-DAYDEPTH_L2 data product
prs_botsflu_4wkrate -- computes the BOTSFLU-4WKRATE_L2 data product
prs_botsflu_8wkrate -- computes the BOTSFLU-8WKRATE_L2 data product
Functions calculating event notifications; they return either True or False.
BOTSFLU:
prs_tsunami_detection -- event notification specified by DPS
prs_eruption_imminent -- event notification specified by DPS
prs_eruption_occurred -- event notification specified by DPS
Worker functions called by functions calculating data products.
BOTSFLU
anchor_bin
calc_daydepth_plus
calc_meandepth_plus
calculate_sliding_means
calculate_sliding_slopes
"""
def prs_bottilt_ccmp(scmp, sn):
"""
Description:
OOI Level 1 Seafloor High-Resolution tilt (BOTTILT) core data product,
derived from data output by the Applied Geomechanics LILY tilt sensor
on board the Bottom Pressure Tilt (BOTPT) instruments on the Regional
Scale Nodes (RSN) at Axial Seamount. This function computes
BOTTILT-CCMP_L1.
Implemented by:
2013-06-10: <NAME>. Initial code.
2014-03-20: <NAME>. Alternate code: faster, but less direct.
Usage:
ccmp = prs_bottilt_ccmp(scmp, sn)
where
ccmp = Corrected compass direction (BOTTILT-CCMP_L1) [degrees]
scmp = Uncorrected sensor compass direction (BOTTILT-SCMP_L0) [degrees]
sn = LILY sensor serial number [unitless]
References:
OOI (2013). Data Product Specification for Seafloor High-Resolution
Tilt. Document Control Number 1341-00060.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00060_Data_Product_SPEC_BOTTILT_OOI.pdf)
"""
"""
Currently, there are two coded algorithms:
(1) the straightforward original, which uses a two-element keyed dictionary;
(2) a faster version, which uses serial number keys to the dictionary.
Since each algorithm uses its own dictionary, the corresponding import statements
are TEMPORARILY placed at the beginning of their respective code sections
instead of at module top.
"""
### Original coding, using a dictionary constructed with 2-element keys.
# load the corrected compass directions table [(sn, scmp) keys]
from ion_functions.data.prs_functions_ccmp import cmp_lookup
# use the lookup table to get the ccmp
ccmp = np.zeros(len(scmp))
for i in range(len(scmp)):
ccmp[i] = cmp_lookup[(sn[i], int(round(scmp[i])))]
return ccmp
#### Faster coding, using a dictionary constructed with 1-element keys.
#
## load the corrected compass directions table [sn keys]
#from ion_functions.data.prs_functions_ccmp_lily_compass_cals import cmp_cal
#
## initialize output array for vectorized masking operations. this will 'break'
## the code if an invalid serial number is specified in the argument list.
#ccmp = np.zeros(len(scmp)) + np.nan
#
## round the uncorrected compass values to the nearest integer as specified in the DPS,
## which uses a lookup table consisting of integral values to do the correction.
#scmp = np.round(scmp)
#
## find the supported tilt sensor serial numbers, which are keys in the dictionary
#sernum = cmp_cal.keys()
#
#for ii in range(len(sernum)):
# # get the cal coeffs as a function of the iterated serial number;
# # x is the raw, uncorrected reading (scmp)
# # y is the corrected reading (ccmp)
# [x, y] = cmp_cal[sernum[ii]]
#
# # the boolean mask has 'true' entries where the elements of input vector sn
# # agree with the iterated serial number.
# # np.core.defchararray.equal handles vector string comparisons.
# mask = np.core.defchararray.equal(sn, sernum[ii])
#
# ## np.interp is used to do the 'lookup' for performance reasons (vectorized)
# ccmp[mask] = np.interp(scmp[mask], x, y)
#
## round to make sure we get an integral value (but not int type)
#return np.round(ccmp)
def prs_bottilt_tmag(x_tilt, y_tilt):
"""
Description:
OOI Level 1 Seafloor High-Resolution Tilt (BOTTILT) core data product,
derived from data output by the Applied Geomechanics LILY tilt sensor
on board the Bottom Pressure Tilt (BOTPT) instruments on the Regional
Scale Nodes (RSN) at Axial Seamount. This function computes
BOTTILT-TMAG_L1.
Implemented by:
2013-06-10: <NAME>. Initial code.
Usage:
tmag = prs_bottilt(x_tilt, y_tilt)
where
tmag = Resultant tilt magnitude (BOTTILT-TMAG_L1) [microradians]
x_tilt = Sensor X_tilt (BOTTILT-XTLT_L0) [microradians]
y_tilt = Sensor Y_tilt (BOTTILT-YTLT_L0) [microradians]
References:
OOI (2013). Data Product Specification for Seafloor High-Resolution
Tilt. Document Control Number 1341-00060.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00060_Data_Product_SPEC_BOTTILT_OOI.pdf)
"""
tmag = ne.evaluate('sqrt(x_tilt**2 + y_tilt**2)')
return tmag
def prs_bottilt_tdir(x_tilt, y_tilt, ccmp):
"""
Description:
OOI Level 1 Seafloor High-Resolution Tilt (BOTTILT) core data product,
derived from data output by the Applied Geomechanics LILY tilt sensor
on board the Bottom Pressure Tilt (BOTPT) instruments on the Regional
Scale Nodes (RSN) at Axial Seamount. This function computes
BOTTILT-TDIR_L1.
Implemented by:
2013-06-10: <NAME>. Initial code.
2014-03-20: <NAME>. Replaced initial code with arctan2 implementation.
Usage:
tdir = prs_bottilt(x_tilt, y_tilt, ccmp)
where
tdir = Resultant tilt direction (BOTTILT-TDIR_L1) [degrees]
x_tilt = Sensor X_tilt (BOTTILT-XTLT_L0) [microradians]
y_tilt = Sensor Y_tilt (BOTTILT-YTLT_L0) [microradians]
ccmp = Corrected compass direction (BOTTILT-CCMP_L1) [degrees]
References:
OOI (2013). Data Product Specification for Seafloor High-Resolution
Tilt. Document Control Number 1341-00060.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00060_Data_Product_SPEC_BOTTILT_OOI.pdf)
"""
### As originally coded, according to the algorithm specified in the DPS:
## Calculate the angle to use in the tilt direction formula
## default angle calculation -- in degrees
#angle = ne.evaluate('arctan(y_tilt / x_tilt)')
#angle = np.degrees(angle)
#
## if X-Tilt == 0 and Y-Tilt > 0
#mask = np.logical_and(x_tilt == 0, y_tilt > 0)
#angle[mask] = 90.0
#
## if X-Tilt == 0 and Y-Tilt < 0
#mask = np.logical_and(x_tilt == 0, y_tilt < 0)
#angle[mask] = -90.0
#
## if Y-Tilt == 0
#mask = np.equal(y_tilt, np.zeros(len(y_tilt)))
#angle[mask] = 0.0
#
### Calculate the tilt direction, using the X-Tilt to set the equation
## default tilt direction equation
#tdir = ne.evaluate('(270 - angle + ccmp) % 360')
#
## if X-Tilt >= 0
#tmp = ne.evaluate('(90 - angle + ccmp) % 360')
#mask = np.greater_equal(x_tilt, np.zeros(len(x_tilt)))
#tdir[mask] = tmp[mask]
#
#return np.round(tdir)
# The preceding calculation is faster and simpler if the arctan2 function is used.
# Use 450 as an addend in the first argument to the mod function to make sure the result is positive.
return np.round(np.mod(450 - np.degrees(np.arctan2(y_tilt, x_tilt)) + ccmp, 360))
def prs_botsflu_time15s(timestamp):
"""
Description:
Calculates the auxiliary BOTSFLU data product TIME15S-AUX. These are timestamps
anchored at multiples of 15 seconds past the minute which correspond to the time
base for the BOTSFLU data products which are binned on 15 seconds.
Implemented by:
2015-01-13: <NAME>. Initial code.
Usage
time15s = prs_botsflu_time15s(timestamp)
where
time15s = BOTSFLU-TIME15S-AUX [sec since 01-01-1900]
timestamp = OOI system timestamps [sec since 01-01-1900]
Notes:
The BOTSFLU data products associated with this timebase are:
MEANPRES
PREDTIDE
MEANDEPTH
5MINRATE
10MINRATE
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
bin_duration = 15.0 # seconds
# the second calling argument is a placeholder
time15s = anchor_bin(timestamp, None, bin_duration, 'time')
return time15s
def prs_botsflu_meanpres(timestamp, botpres):
"""
Description:
Calculates the BOTSFLU data product MEANPRES_L1.
Implemented by:
2015-01-13: <NAME>. Initial code.
Usage
meanpres = prs_botsflu_meanpres(timestamp, botpres)
where
meanpres = BOTSFLU-MEANPRES_L2 [psi]
timestamp = OOI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The timebase data product associated with this data product is TIME15S.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
bin_duration = 15.0 # seconds
meanpres, _ = anchor_bin(timestamp, botpres, bin_duration, 'data')
return meanpres
def prs_botsflu_predtide(time):
"""
Description:
Assigns tide values for the 3 BOTPT instrument sites about 500 km west of Astoria.
When the input argument is the data product TIME15S, the output of this function
will be the BOTSFLU data product PREDTIDE.
Implemented by:
2015-01-13: <NAME>. Initial code.
Usage:
PREDTIDE = prs_botsflu_predtide(TIME15S)
where
PREDTIDE = BOTSFLU-PREDTIDE data product [m]
TIME15S = BOTSFLU-TIME15S data product [sec since 01-01-1900].
Notes:
Lookup table in binary file: 'ion_functions/data/prs_functions_tides_2014_thru_2019.mat'
The lookup table contains tide values every 15 seconds from 2014-01-01 to 2020-01-01
at lat = 45.95547 lon = -130.00957 calculated by the Tide Model Driver software
written in Matlab (Mathworks, Natick, MA) using the TPXO7.2 global model. The tides
corresponding to time are determined by positional indexing (the first value is for
2014-01-01 00:00:00, the second is for 2014-01-01 00:00:15, etc). The 3 BOTPT sites
are close enough together that the caldera center location can be used for all, as
above: lat = 45.95547 lon = -130.00957.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
Matlab code to calculate tides using TPXO7.2 global model:
http://polaris.esr.org/ptm_index.html
Further documentation for the TPXO7.2 global tide model:
http://volkov.oce.orst.edu/tides/global.html
"""
time0 = 3597523200.0 # midnight, 2014-01-01
time_interval = 15.0 # seconds
# for unit test data, only, feb-apr 2011
if time[0] < time0:
time0 = 3502828800.0 # midnight, 2011-01-01
# tide values are signed 4 byte integers, units [0.001mm]
matpath = 'ion_functions/data/matlab_scripts/botpt/'
dict_tides = sp.io.loadmat(matpath + 'tides_15sec_2011_for_unit_tests.mat')
tidevector = 0.000001 * dict_tides['tides_mat']
tidevector = tidevector.reshape((-1))
# calculate tide vector index as a function of timestamp
idx = np.around((time - time0)/time_interval)
tide = tidevector[idx.astype(int)]
return tide
# else, OOI data from 2014 onwards
# tide values are signed 4 byte integers, units [0.001mm]
dict_tides = sp.io.loadmat('ion_functions/data/prs_functions_tides_2014_thru_2019.mat')
tidevector = 0.000001 * dict_tides['tides_mat']
tidevector = tidevector.reshape((-1))
# calculate tide vector index as a function of timestamp
idx = np.around((time - time0)/time_interval)
tide = tidevector[idx.astype(int)]
return tide
def prs_botsflu_meandepth(timestamp, botpres):
"""
Description:
Calculates the BOTSFLU data product MEANDEPTH_L2, de-tided bottom depth
as a function of time (15sec bins).
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
meandepth = prs_botsflu_meandepth(timestamp, botpres)
where
meandepth = BOTSFLU-MEANDEPTH_L2 [m]
timestamp = OOI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The timebase data product associated with this data product is TIME15S.
The DPS specifies that atmospheric pressure not be subtracted from the
L1 pressure data even though its units are [psia].
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
_, meandepth, _ = calc_meandepth_plus(timestamp, botpres)
return meandepth
def prs_botsflu_5minrate(timestamp, botpres):
"""
Description:
Calculates the BOTSFLU data product 5MINRATE_L2, the instantaneous rate of
depth change using 5 minute backwards-looking meandepth data.
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
botsflu_5minrate = pprs_botsflu_5minrate(timestamp, botpres)
where
botsflu_5minrate = BOTSFLU-5MINRATE_L2 [cm/min]
timestamp = CI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The timebase data product associated with this data product is TIME15S.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
# calculate de-tided depth and the positions of non-zero bins in the original data.
_, meandepth, mask_nonzero = calc_meandepth_plus(timestamp, botpres)
# initialize data product including elements representing data gap positions
botsflu_5minrate = np.zeros(mask_nonzero.size) + np.nan
# re-constitute the original data, with data gaps represented by nans.
data_w_gaps = np.copy(botsflu_5minrate)
data_w_gaps[mask_nonzero] = meandepth
# for 15s binned data, 5 minutes comes out to (5 minutes)/(0.25 min) = 20 intervals
shift = 20
# units of the subtraction are meter/5min; to convert to cm/min,
# multiply by 100cm/m and divide by 5 = 20.
botsflu_5minrate[shift:] = 20.0 * (data_w_gaps[shift:] - data_w_gaps[:-shift])
# this rate product now has potentially two sources of nans;
# definitely those at the start of the data record, and any that might
# have been propagated into the calculation because of the presence of
# data gaps. remove those only at the data dropout positions (if present)
# so that this data product will have a 1:1 correspondence with
# its associated timestamp variable (TIME15S).
botsflu_5minrate = botsflu_5minrate[mask_nonzero]
return botsflu_5minrate
def prs_botsflu_10minrate(timestamp, botpres):
"""
Description:
Calculates the BOTSFLU data product 10MINRATE_L2, the mean seafloor uplift rate
calculated using 10 minute backwards-looking 10 minute running mean depth data.
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
botsflu_10minrate = pprs_botsflu_10minrate(timestamp, botpres)
where
botsflu_10minrate = BOTSFLU-10MINRATE_L2 [cm/hr]
timestamp = OOI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The timebase data product associated with this data product is TIME15S.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
# calculate de-tided depth and the positions of non-zero bins in the original data.
_, meandepth, mask_nonzero = calc_meandepth_plus(timestamp, botpres)
# initialize data product including elements representing data gap positions
botsflu_10minrate = np.zeros(mask_nonzero.size) + np.nan
# re-constitute the original data, with data gaps represented by nans.
data_w_gaps = np.copy(botsflu_10minrate)
data_w_gaps[mask_nonzero] = meandepth
# now calculate sliding 10 minute means.
# the mean of the 1st 40 values will be located at timestamp position 20
# (python index 19).
window_size = 40 # 10min averages on 0.25min binned data
means = calculate_sliding_means(data_w_gaps, window_size)
# as above, 10 minutes = 40 intervals for 15sec binned data.
shift = 40
# units of the subtraction are meter/10min; to convert to cm/hr,
# multiply by 100cm/m and multiply by 6 = 600.
botsflu_10minrate[shift:] = 600.0 * (means[shift:] - means[:-shift])
# this rate product now has potentially two sources of nans;
# definitely those at the start of the data record, and any that might
# have been propagated into the calculation because of the presence of
# data gaps. remove those only at the data dropout positions (if present)
# so that this data product will have a 1:1 correspondence with
# its associated timestamp variable (TIME15S).
botsflu_10minrate = botsflu_10minrate[mask_nonzero]
return botsflu_10minrate
def prs_botsflu_time24h(time15s):
"""
Description:
Calculates the auxiliary BOTSFLU data product TIME24H-AUX. These are
timestamps anchored at midnight which correspond to the time base for
the BOTSFLU data products which are binned on a day's worth of data.
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
time24h = prs_botsflu_time24h(time15s)
where
time24h = BOTSFLU-TIME24H-AUX [sec since 01-01-1900]
time15s = BOTSFLU-TIME15S-AUX [sec since 01-01-1900]
Notes:
The BOTSFLU data products associated with this timebase are:
DAYDEPTH
4WKRATE
8WKRATE
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
bin_duration = 86400.0 # seconds in a bin
# the second calling argument is a placeholder
time24h = anchor_bin(time15s, None, bin_duration, 'time')
return time24h
def prs_botsflu_daydepth(timestamp, botpres):
"""
Description:
Calculates the BOTSFLU data product DAYDEPTH_L2, de-tided bottom depth
as a function of time (1 day bins).
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
daydepth = prs_botsflu_daydepth(timestamp, botpres)
where
daydepth = BOTSFLU-DAYDEPTH_L2 [m]
timestamp = OOI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The timebase data product associated with this data product is TIME24H.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
daydepth, _ = calc_daydepth_plus(timestamp, botpres)
return daydepth
def prs_botsflu_4wkrate(timestamp, botpres):
"""
Description:
Calculates the BOTSFLU data product 4WKRATE_L2, the mean rate of seafloor
change as calculated by 4-week backwards-looking linear regressions.
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
botsflu_4wkrate = pprs_botsflu_4wkrate(timestamp, botpres)
where
botsflu_4wkrate = BOTSFLU-4WKRATE_L2 [cm/yr]
timestamp = CI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The timebase data product associated with this data product is TIME24H.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
# calculate daydepth and the mask of nonzero data bins.
daydepth, mask_nonzero = calc_daydepth_plus(timestamp, botpres)
# re-constitute the original data, with data gaps represented by nans.
data_w_gaps = np.zeros(mask_nonzero.size) + np.nan
data_w_gaps[mask_nonzero] = daydepth
# 4 weeks of data
window_size = 29
botsflu_4wkrate = calculate_sliding_slopes(data_w_gaps, window_size)
# (1) remove appropriate bins to re-establish the 1:1 correspondence
# to TIME24H timestamps;
# (2) convert units:
# the units of the slopes are [y]/[x] = meters/day;
# to get units of cm/yr, multiply by 100cm/m * 365 days/yr
botsflu_4wkrate = 100.0 * 365.0 * botsflu_4wkrate[mask_nonzero]
return botsflu_4wkrate
def prs_botsflu_8wkrate(timestamp, botpres):
"""
Description:
Calculates the BOTSFLU data product 8WKRATE_L2, the mean rate of seafloor
change as calculated by 8-week backwards-looking linear regressions.
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
botsflu_8wkrate = pprs_botsflu_8wkrate(timestamp, botpres)
where
botsflu_8wkrate = BOTSFLU-8WKRATE_L2 [cm/yr]
timestamp = OOI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The timebase data product associated with this data product is TIME24H.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
# calculate daydepth and the mask of nonzero data bins.
daydepth, mask_nonzero = calc_daydepth_plus(timestamp, botpres)
# re-constitute the original data, with data gaps represented by nans.
data_w_gaps = np.zeros(mask_nonzero.size) + np.nan
data_w_gaps[mask_nonzero] = daydepth
# 8 weeks of data
window_size = 57
botsflu_8wkrate = calculate_sliding_slopes(data_w_gaps, window_size)
# (1) remove appropriate bins to re-establish the 1:1 correspondence
# to TIME24H timestamps;
# (2) convert units:
# the units of the slopes are [y]/[x] = meters/day;
# to get units of cm/yr, multiply by 100cm/m * 365 days/yr
botsflu_8wkrate = 100.0 * 365.0 * botsflu_8wkrate[mask_nonzero]
return botsflu_8wkrate
#**********************************************************************
#.. EVENT NOTIFICATION: tsunami detection
#**********************************************************************
def prs_tsunami_detection(botsflu_5minrate, tsunami_detection_threshold=1.0):
"""
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage:
TF = prs_tsunami_detection(BOTSFLU-5MINRATE_L2)
where
TF = True or False; whether a tsunami event has been detected.
WARNING: This function and its data product input argument were coded as instructed
in the DPS using the pseudocode specified. The robustness of this code has
not been checked with actual data.
"""
# units of variable and threshold are [cm/min]
boolean_tsunami_detection = False
# get rid of runtime warnings if nans are present
botsflu_5minrate[np.isnan(botsflu_5minrate)] = 0.0
if np.any(np.abs(botsflu_5minrate) >= tsunami_detection_threshold):
boolean_tsunami_detection = True
return boolean_tsunami_detection
#**********************************************************************
#.. EVENT NOTIFICATION: eruption imminent
#**********************************************************************
def prs_eruption_imminent(botsflu_10minrate, eruption_imminent_threshold=5.0):
"""
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage:
TF = prs_eruption_imminent(BOTSFLU-10MINRATE_L2)
where
TF = True or False; whether an eruption event is imminent.
WARNING: This function and its data product input argument were coded as instructed
in the DPS using the pseudocode specified. The robustness of this code has
not been checked with actual data.
"""
# units of variable and threshold are [cm/hr]
boolean_eruption_imminent = False
# get rid of runtime warnings if nans are present
botsflu_10minrate[np.isnan(botsflu_10minrate)] = 0.0
if np.any(botsflu_10minrate >= eruption_imminent_threshold):
boolean_eruption_imminent = True
return boolean_eruption_imminent
#**********************************************************************
#.. EVENT NOTIFICATION: eruption occurred
#**********************************************************************
def prs_eruption_occurred(botsflu_10minrate, eruption_occurred_threshold=-5.0):
"""
Implemented by:
2015-01-14: <NAME>iderio. Initial code.
Usage:
TF = prs_eruption_occurred(BOTSFLU-10MINRATE_L2)
where
TF = True or False; whether an eruption event has occurred.
WARNING: This function and its data product input argument were coded as instructed
in the DPS using the pseudocode specified. The robustness of this code has
not been checked with actual data.
"""
# units of variable and threshold are [cm/hr]
boolean_eruption_occurred = False
# get rid of runtime warnings if nans are present
botsflu_10minrate[np.isnan(botsflu_10minrate)] = 0.0
if np.any(botsflu_10minrate <= eruption_occurred_threshold):
boolean_eruption_occurred = True
return boolean_eruption_occurred
def anchor_bin(time, data, bin_duration, mode):
"""
Description:
Calculates 'anchored' timestamps (see Notes) and binned data based on timestamps
in units of seconds since midnight. Written explicitly for the BOTSFLU DPA which
requires two stages of binning: 20hz data on 15 seconds, then the 15sec data on 24 hours.
Implemented by:
2015-01-13: <NAME>. Initial code.
2015-01-14: <NAME>. Changed output arguments and incorporated conditionals
to improve program efficiency.
Usage (1):
bin_timestamps = anchor_bin(time, None, bin_duration, 'time')
where
bin_timestamps = 1D array of centered timestamps for non-empty bins
time = 1D array of timestamps, units of sec since 01-01-1900
None = not used; python placeholder object
bin_duration = size of bin [s]
mode = the string 'time'
Usage (2):
binned_data, mask_nonzero = anchor_bin(time, data, bin_duration, 'data')
where
binned_data = 1D array of binned data; no empty bins are represented
mask_nonzero = boolean where True values represent locations of non-empty bins
time = 1D array of timestamps, units of sec since 01-01-1900
data = data to be binned
bin_duration = size of bin [s]
mode = the string 'data'
Usage (3):
bin_timestamps, binned_data, mask_nonzero = anchor_bin(time, data, bin_duration, 'both')
where
bin_timestamps = 1D array of centered timestamps for non-empty bins
binned_data = 1D array of binned data; no empty bins are represented
mask_nonzero = boolean where True values represent locations of non-empty bins
time = 1D array of timestamps, units of sec since 01-01-1900
data = data to be binned
bin_duration = size of bin [s]
mode = the string 'both'
Notes:
The conditional construction is used so that only necessary statements are executed;
when multiple years' worth of 20 Hz data is operated on, each np.bincount operation
may take multiple tens of seconds to execute.
The np.bincount routine is used in the same way accumarray in matlab is used
to bin data. The key to the routine is to convert the timestamps into elapsed
time in units of bin_duration and to construct bins based on the floored
bin_duration times. The summing is then carried out by using the weighting
feature of the np.bincount function, as described in the example in the
numpy.bincount documentation as listed in the References.
The BOTSFLU data products require binning at two stages. Bin results both with
and without empty bins are required. The output arguments have been selected to
provide this flexibility (in particular mask_nonzero).
This routine has been constructed to supply 'anchored' timestamps. For example,
if the bin_duration is 86400 (the number of seconds in a day) then the start time
will be half a bin earlier than the first day of data (at noon) and all timestamps
will be 'anchored' at midnight. Similarly, if the bin_duration is 15 sec, all
timestamps will be at 00, 15, 30, and 45 seconds past the minute.
References:
http://docs.scipy.org/doc/numpy-1.8.1/reference/generated/numpy.bincount.html.
"""
half_bin = bin_duration/2.0
# anchor time-centered bins by determining the start time to be half a bin
# before the first 'anchor timestamp', which will an integral number of
# bin_durations after midnight.
start_time = np.floor((time[0] - half_bin)/bin_duration) * bin_duration + half_bin
# calculate elapsed time from start in units of bin_duration.
time_elapsed = (time - start_time)/bin_duration
# assign each timestamp a bin number index based on its elapsed time.
bin_number = np.floor(time_elapsed).astype(int)
# the number of elements in each bin is given by
bin_count = np.bincount(bin_number).astype(float)
# create a logical mask of non-zero bin_count values
mask_nonzero = (bin_count != 0)
# to calculate timestamps and to get tides, without also binning data.
# mask_nonzero is not needed.
if mode == 'time':
# directly calculate bin timestamp, units of [sec]:
# the midpoint of the data interval is used.
bin_timestamps = start_time + half_bin + bin_duration * np.arange(bin_count.size)
# keep only the bins with values
bin_timestamps = bin_timestamps[mask_nonzero]
return bin_timestamps
# for binning data when the resultant timestamps are not explicitly required.
# daydepth_plus also requires mask_nonzero for downstream products 4wkrate and 8wkrate.
elif mode == 'data':
# sum the values in each time bin, and put into the variable binned_data
binned_data = np.bincount(bin_number, data)
# divide the values in non-empty bins by the number of values in each bin
binned_data = binned_data[mask_nonzero]/bin_count[mask_nonzero]
return binned_data, mask_nonzero
# for when both timestamps and binned data are required.
elif mode == 'both':
bin_timestamps = start_time + half_bin + bin_duration * np.arange(bin_count.size)
bin_timestamps = bin_timestamps[mask_nonzero]
binned_data = np.bincount(bin_number, data)
binned_data = binned_data[mask_nonzero]/bin_count[mask_nonzero]
return bin_timestamps, binned_data, mask_nonzero
def calc_daydepth_plus(timestamp, botpres):
"""
Description:
Worker function to calculate the botsflu data product daydepth plus an
additional boolean mask required to calculate other botsflu data products
downstream from daydepth.
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
daydepth, mask_nonzero = calc_daydepth_plus(timestamp, botpres)
where
daydepth = BOTSFLU-DAYDEPTH_L2 [m]
mask_nonzero = boolean of positions of non-empty 24 hr bins
timestamp = OOI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
# calculate 15sec bin timestamps and de-tided depth.
time15s, meandepth, _ = calc_meandepth_plus(timestamp, botpres)
# bin the 15sec data into 24 hour bins so that the timestamps are at midnight.
# to calculate daydepth, don't need the time24h timestamps.
bin_duration = 86400.0 # number of seconds in a day
daydepth, mask_nonzero = anchor_bin(time15s, meandepth, bin_duration, 'data')
# downstream data products require the mask_nonzero variable, so pass
# it as an output argument so that it doesn't need to be recalculated.
return daydepth, mask_nonzero
def calc_meandepth_plus(timestamp, botpres):
"""
Description:
Worker function to calculate the botsflu data product meandepth plus
additional variables required to calculate other botsflu data products
downstream from meandepth.
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
time15s, meandepth, mask_nonzero = calc_meandepth_plus(timestamp, botpres)
where
time15s = TIME15S [sec since 01-01-1900]
meandepth = BOTSFLU-MEANDEPTH_L2 [m]
mask_nonzero = boolean of positions of non-empty bins in the original data
timestamp = OOI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
The DPS specifies that atmospheric pressure not be subtracted from the
L1 pressure data even though its units are [psia].
The DPS convention is that depths are negative, so that to detide the
pressure record, the predicted tide is added to the negative depths.
This function was written as a way to eliminate the execution of time
consuming duplicate calculations in the botsflu coding within the
OOI CI architecture constraints.
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
# The pressure values do have units of psia. However, historically at these sites
# atmospheric pressure has *not* been subtracted when converting the pressure data
# to depth. Therefore the DPS authors do not want atmospheric pressure subtracted
# in the DPA. To emphasize this, I have created the variable atm_press_psi and set
# it to 0.
atm_press_psi = 0.0
psi_2_depth = -0.67 # psi to depth in meters
bin_duration = 15.0 # seconds
time15s, meanpres, mask_nonzero = anchor_bin(timestamp, botpres, bin_duration, 'both')
# look up tide data
tide = prs_botsflu_predtide(time15s)
# de-tide
meandepth = ((meanpres - atm_press_psi) * psi_2_depth) + tide
# downstream data products require the time15s and mask_nonzero variables,
# so pass these as output arguments so that they won't have to be recalculated.
return time15s, meandepth, mask_nonzero
def calculate_sliding_means(data, window_size):
"""
Description:
Calculates time-centered means using digital convolution for the
BOTSFLU data product 10MINRATE.
Implemented by:
2015-01-13: <NAME>. Initial code.
Usage
means = calculate_sliding_means(data, window_size)
where
means = 1D array of sliding means
data = 1D array of data
window_size = even integer
"""
kk = np.ones(window_size) / window_size
means = np.convolve(data, kk, 'same')
# matlab and numpy behave differently for even window sizes, so
means = np.roll(means, -1)
# in this application, window_size is always even.
hfwin = window_size/2 - 1
# nan out data with boundary effects at edges.
# the values in the array means will be of type float because kk is a float,
# so that the np.nan assignment statements will work as intended.
means[0:hfwin] = np.nan
means[-hfwin-1:] = np.nan
return means
def calculate_sliding_slopes(data, window_size):
"""
Description:
Calculates backwards-looking sliding slopes using Moore_Penrose
pseudo-inverse matrices; required for the BOTSFLU data products
4WKRATE and 8WKRATE.
Implemented by:
2015-01-13: <NAME>. Initial code.
Usage
slopes = calculate_sliding_slopes(data, window_size)
where
slopes = 1D array of sliding slopes
data = 1D array of data
window_size = integer
Notes
Code lifted from <NAME>'s response on Matlab Central (thread 49181)
to a query on how to calculate vectorized rolling regressions. For a more
generalized application of the pinv\filter method, see D'Errico's 2007 code
for movingslope.m on Matlab Central's file exchange (16997).
The slopes are backwards-looking, not centered. The first non-nan value occurs
at index window_size, and is the slope of a regression of the first (window_size
+ 1) points.
"""
column1 = np.ones((window_size, 1))
column2 = -np.arange(float(window_size)).reshape(-1, 1)
X = | np.hstack((column1, column2)) | numpy.hstack |
import numpy as np
from scipy import interpolate
from sklearn.model_selection import KFold
def evaluate(distances, labels, nrof_folds=10):
thresholds = np.arange(0, 4, 0.01)
tpr, fpr, accuracy, best_thresholds = calculate_roc(thresholds, distances,
labels, nrof_folds=nrof_folds)
thresholds = np.arange(0, 4, 0.001)
val, val_std, far = calculate_val(thresholds, distances,
labels, 1e-3, nrof_folds=nrof_folds)
return tpr, fpr, accuracy, val, val_std, far, best_thresholds
def calculate_roc(thresholds, distances, labels, nrof_folds=10):
nrof_pairs = min(len(labels), len(distances))
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds,nrof_thresholds))
fprs = np.zeros((nrof_folds,nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(threshold, distances[train_set], labels[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, distances[test_set], labels[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], distances[test_set], labels[test_set])
tpr = np.mean(tprs,0)
fpr = np.mean(fprs,0)
return tpr, fpr, accuracy, thresholds[best_threshold_index]
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp+fn==0) else float(tp) / float(tp+fn)
fpr = 0 if (fp+tn==0) else float(fp) / float(fp+tn)
acc = float(tp+tn)/dist.size
return tpr, fpr, acc
def calculate_val(thresholds, distances, labels, far_target=1e-3, nrof_folds=10):
nrof_pairs = min(len(labels), len(distances))
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
indices = | np.arange(nrof_pairs) | numpy.arange |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Model an instrument response for spectroscopic simulations.
An instrument model is usually initialized from a configuration used to create
a simulator and then accessible via its ``instrument`` attribute, for example:
>>> import specsim.simulator
>>> simulator = specsim.simulator.Simulator('test') # doctest: +IGNORE_OUTPUT
>>> print(np.round(simulator.instrument.fiber_diameter, 1))
107.0 um
See :doc:`/api` for examples of changing model parameters defined in the
configuration. No attributes can be changed after a simulator has
been created. File a github issue if you would like to change this.
An :class:`Instrument` includes one or more
:class:`Cameras <specsim.camera.Camera>`.
"""
from __future__ import print_function, division
import numpy as np
import os.path
import scipy.interpolate
import scipy.integrate
import astropy.constants
import astropy.units as u
import specsim.camera
import specsim.fastfiberacceptance
import specsim.config
class Instrument(object):
"""Model the instrument response of a fiber spectrograph.
A spectrograph can have multiple :mod:`cameras <specsim.camera>` with
different wavelength coverages. Objects representing each camera are
contained in a list accessible from our ``cameras`` attribute, which will
be in order of increasing effective wavelength.
No instrument attributes can be changed after an instrument has been
created. Create a github issue if you would like to change this.
Parameters
----------
name : str
Descriptive name of this instrument.
wavelength : astropy.units.Quantity
Array of wavelength bin centers where the instrument response is
calculated, with units.
fiberloss_method : str
Must be "table" or "galsim" or "fastsim". Specifies how fiber acceptance fractions
will be loaded or calculated.
fiber_acceptance_dict : dict or None
Dictionary of fiber acceptance fractions tabulated for different
source models, with keys corresponding to source model names.
Ignored when fiberloss_method is "galsim".
fast_fiber_acceptance : specsim.fastfiberacceptance.FastFiberAcceptance or None
Initialized instance to use when fiberloss_method is "fastsim".
Ignored for other values of fiberloss_method.
fiberloss_num_wlen : int
Number of wavelengths where the fiberloss fraction should be tabulated
for interpolation. Ignored when fiberloss_method is not "galsim".
fiberloss_num_pixels : int
Number of pixels used to subdivide the fiber diameter for
numerical convolution and integration calculations.
Ignored when fiberloss_method is not "galsim".
blur_function : callable
Function of field angle and wavelength that returns the corresponding
RMS blur in length units (e.g., microns).
offset_function : callable
Function of focal-plane position (x,y) in angular units and wavelength
that returns the corresponding radial centroid offset in length
units (e.g., microns).
cameras : list
List of :class:`specsim.camera.Camera` instances representing the
camera(s) of this instrument.
primary_mirror_diameter : astropy.units.Quantity
Diameter of the primary mirror, with units.
obscuration_diameter : astropy.units.Quantity
Diameter of a central obscuration of the primary mirror, with units.
support_width : astropy.units.Quantity
Width of the obscuring supports, with units.
fiber_diameter : astropy.units.Quantity
Physical diameter of the simulated fibers, with units of length.
Converted to an on-sky diameter using the plate scale.
field_radius : astropy.units.Quantity
Maximum radius of the field of view in length units measured at
the focal plane. Converted to an angular field of view using the
plate scale.
radial_scale : callable
Callable function that returns the plate scale in the radial
(meridional) direction (with appropriate units) as a function of
focal-plane distance (with length units) from the boresight.
azimuthal_scale : callable
Callable function that returns the plate scale in the azimuthal
(sagittal) direction (with appropriate units) as a function of
focal-plane distance (with length units) from the boresight.
"""
def __init__(self, name, wavelength, fiberloss_method,
fiber_acceptance_dict, fast_fiber_acceptance, fiberloss_num_wlen,
fiberloss_num_pixels, blur_function, offset_function, cameras,
primary_mirror_diameter, obscuration_diameter, support_width,
fiber_diameter, field_radius, radial_scale, azimuthal_scale):
self.name = name
self._wavelength = wavelength
self.fiber_acceptance_dict = fiber_acceptance_dict
self.fast_fiber_acceptance = fast_fiber_acceptance
# Both fiber_acceptance_dict and fast_fiber_acceptance must be initialized
# before assigning to fiberloss_method (since its setter checks their values).
self.fiberloss_method = fiberloss_method
self.fiberloss_num_wlen = fiberloss_num_wlen
self.fiberloss_num_pixels = fiberloss_num_pixels
self._blur_function = blur_function
self._offset_function = offset_function
self.cameras = cameras
self.primary_mirror_diameter = primary_mirror_diameter
self.obscuration_diameter = obscuration_diameter
self.support_width = support_width
self.fiber_diameter = fiber_diameter
self.field_radius = field_radius
self.radial_scale = radial_scale
self.azimuthal_scale = azimuthal_scale
# Calculate the effective area of the primary mirror.
D = self.primary_mirror_diameter
obs = self.obscuration_diameter
support_area = 0.5*(D - obs) * self.support_width
self.effective_area = (
np.pi * ((0.5 * D) ** 2 - (0.5 * obs) ** 2) - 4 * support_area)
# Tabulate the mapping between focal plane radius and boresight
# opening angle by integrating the radial plate scale.
# Use mm and radians as the canonical units.
self._radius_unit, self._angle_unit = u.mm, u.rad
radius = np.linspace(
0., self.field_radius.to(self._radius_unit).value, 1000)
dradius_dangle = self.radial_scale(radius * self._radius_unit).to(
self._radius_unit / self._angle_unit).value
angle = scipy.integrate.cumtrapz(
1. / dradius_dangle, radius, initial=0.)
# Record the maximum field angle corresponding to our field radius.
self.field_angle = angle[-1] * self._angle_unit
# Build dimensionless linear interpolating functions of the
# radius <-> angle map using the canonical units.
self._radius_to_angle = scipy.interpolate.interp1d(
radius, angle, kind='linear', copy=True, bounds_error=True)
self._angle_to_radius = scipy.interpolate.interp1d(
angle, radius, kind='linear', copy=True, bounds_error=True)
# Calculate the energy per photon at each wavelength.
hc = astropy.constants.h * astropy.constants.c
energy_per_photon = (hc / self._wavelength).to(u.erg)
# Calculate the rate of photons incident on the focal plane per
# wavelength bin per unit spectral flux density. The fiber acceptance
# fraction is not included in this calculation.
wavelength_bin_size = np.gradient(self._wavelength)
self.photons_per_bin = (
self.effective_area * wavelength_bin_size / energy_per_photon
).to((u.cm**2 * u.Angstrom) / u.erg)
wave_mid = []
for i, camera in enumerate(self.cameras):
wave_min, wave_max = camera.wavelength_min, camera.wavelength_max
wave_mid.append(0.5 * (wave_min + wave_max))
if i == 0:
self.wavelength_min = wave_min
self.wavelength_max = wave_max
else:
self.wavelength_min = min(self.wavelength_min, wave_min)
self.wavelength_max = max(self.wavelength_max, wave_max)
# Sort cameras in order of increasing wavelength.
self.cameras = [x for (y, x) in sorted(zip(wave_mid, self.cameras))]
@property
def fiberloss_method(self):
"""The current method used to calculate fiber acceptance fractions.
"""
return self._fiberloss_method
@fiberloss_method.setter
def fiberloss_method(self, fiberloss_method):
"""Set the method used to calculate fiber acceptance fractions.
Must be one of "table" or "galsim" or "fastsim".
"""
if fiberloss_method not in ('table', 'galsim', 'fastsim' ):
raise ValueError(
'fiberloss_method must be "table" or "galsim" or "fastsim".')
if fiberloss_method == 'table' and self.fiber_acceptance_dict is None:
raise ValueError('Missing required instrument.fiberloss.table.')
if fiberloss_method == 'fastsim' and self.fast_fiber_acceptance is None:
raise ValueError(
'Missing required instrument.fiberloss.fast_fiber_acceptance_path.')
if fiberloss_method == 'galsim':
try:
import galsim
except ImportError:
raise ValueError('The galsim package is not installed.')
self._fiberloss_method = fiberloss_method
def field_radius_to_angle(self, radius):
"""Convert focal plane radius to an angle relative to the boresight.
The mapping is derived from the radial (meridional) plate scale
function :math:`dr/d\\theta(r)` via the integral:
.. math::
\\theta(r) = \\int_0^{r} \\frac{dr}{dr/d\\theta(r')}\\, dr'
The input values must be within the field of view.
Use :meth:`field_angle_to_radius` for the inverse transform.
Parameters
----------
radius : astropy.units.Quantity
One or more radius values where the angle should be calculated.
Values must be between 0 and ``field radius``.
Returns
-------
astropy.units.Quantity
Opening angle(s) relative to the boresight corresponding to
the input radius value(s).
Raises
------
ValueError
One or more input values are outside the allowed range.
"""
return self._radius_to_angle(
radius.to(self._radius_unit)) * self._angle_unit
def field_angle_to_radius(self, angle):
"""Convert focal plane radius to an angle relative to the boresight.
The mapping :math:`r(\\theta)` is calculated by numerically inverting
the function :math:`\\theta(r)`.
The input values must be within the field of view.
Use :meth:`field_radius_to_angle` for the inverse transform.
Parameters
----------
angle : astropy.units.Quantity
One or more angle values where the radius should be calculated.
Values must be between 0 and ``field_angle``.
Returns
-------
astropy.units.Quantity
Radial coordinate(s) in the focal plane corresponding to the
input angle value(s).
Raises
------
ValueError
One or more input values are outside the allowed range.
"""
return self._angle_to_radius(
angle.to(self._angle_unit)) * self._radius_unit
def get_blur_rms(self, wavelength, angle):
"""Get the instrument PSF blur at the specified field angle.
Parameters
----------
wavelength : astropy.units.Quantity
Wavelength where the blur should be calculated.
angle : astropy.units.Quantity
Angular separation from the field center.
Returns
-------
astropy.units.Quantity
RMS blur of the instrument at this wavelength and field radius
in length units.
"""
return self._blur_function(angle, wavelength)
def get_centroid_offset(self, angle_x, angle_y, wavelength):
"""Get the instrument centroid offset at the specified field angles.
This method does not make any assumptions about how the x and y
axes are defined, as long as (0, 0) is the field center.
Note that the focal-plane position is input as angles relative to
the field center, while the offsets are returned as lengths relative
to the nominal fiber center.
Parameters
----------
angle_x : astropy.units.Quantity
Angular separation from the field center along x.
angle_y : astropy.units.Quantity
Angular separation from the field center along y.
wavelength : astropy.units.Quantity
Wavelength where the blur should be calculated.
Returns
-------
tuple
Tuple (dx, dy) of astropy quantities giving the spot centroid
offset components at this wavelength and position in the focal
plane. Offsets are given in length units, e.g., microns.
"""
return self._offset_function(angle_x, angle_y, wavelength)
def get_focal_plane_optics(self, focal_x, focal_y, wlen_grid):
"""Calculate the optical parameters at a set of focal-plane positions.
Uses :meth:`get_centroid_offset`, :meth:`get_blur_rms`, and
:meth:`field_radius_to_angle` to calculate the optics at each focal
plane location.
This method does not make any assumptions about how the x and y
axes are defined, as long as (0, 0) is the field center. However
radial symmetry is broken by the (dx, dy) offsets calculated by
:meth:`get_centroid_offset`.
Note that units are required for the input arrays and included with
the returned arrays.
Parameters
----------
focal_x : :class:`astropy.units.Quantity`
1D array of X coordinates in the focal plane relative to the
boresight, with length units.
focal_y : :class:`astropy.units.Quantity`
1D array of Y coordinates in the focal plane relative to the
boresight, with length units.
wlen_grid : :class:`astropy.units.Quantity`
1D array of wavelengths where parameters should be tabulated,
with length units.
Returns
-------
tuple
Tuple of arrays scale, blur, offset with shapes (N,2), (N,M) and
(N,M,2) where N is the size of the 1D input (x,y) arrays, M is
the size of the input wavelength grid, and axes of length 2
correspond to radial and azimuthal axes (not the input x,y!).
All output arrays have units.
"""
# Check for valid units on the input arrays.
try:
focal_x_mm = focal_x.to(u.mm).value
focal_y_mm = focal_y.to(u.mm).value
wlen_grid_ang = wlen_grid.to(u.Angstrom).value
except astropy.units.UnitConversionError:
raise ValueError('Input arrays have invalid units.')
except AttributeError:
raise ValueError('Input arrays are missing required units.')
# Check for expected input array shapes.
if len(focal_x_mm.shape) != 1 or len(wlen_grid_ang.shape) != 1:
raise ValueError('Input arrays must be 1D.')
if focal_x_mm.shape != focal_y_mm.shape:
raise ValueError('Input (x,y) arrays have different shapes.')
# Allocate output arrays.
n_xy = len(focal_x_mm)
n_wlen = len(wlen_grid_ang)
scale = np.empty((n_xy, 2))
blur = np.empty((n_xy, n_wlen))
offset = np.empty((n_xy, n_wlen, 2))
# Convert x, y offsets in length units to field angles.
focal_r = np.sqrt(focal_x**2+focal_y**2)
angle_r = self.field_radius_to_angle(focal_r)
angle_x = np.zeros(focal_x.shape) * angle_r.unit
angle_y = np.zeros(focal_y.shape) * angle_r.unit
positive_radius = focal_r>0
angle_x[positive_radius] = (
angle_r[positive_radius] / focal_r[positive_radius]
) * focal_x[positive_radius]
angle_y[positive_radius] = (
angle_r[positive_radius] / focal_r[positive_radius]
) * focal_y[positive_radius]
# Calculate the radial and azimuthal plate scales at each location.
scale[:, 0] = self.radial_scale(focal_r).to(u.um / u.arcsec).value
scale[:, 1] = self.azimuthal_scale(focal_r).to(u.um / u.arcsec).value
# Calculate the transformations between polar and Cartesian coordinates.
phi = | np.arctan2(focal_y_mm, focal_x_mm) | numpy.arctan2 |
# This Python file uses the following encoding: utf-8
# The upper line is needed for one comment in this module.
""" Calculate performance measures from classification results and store them
All performance sink nodes interface to the
:mod:`~pySPACE.resources.dataset_defs.metric` datasets, where the final metric values .
are calculated.
These results can be put together using the
:class:`~pySPACE.resources.dataset_defs.performance_result.PerformanceResultSummary`.
"""
import os
import copy
import warnings
import cPickle
import numpy
import timeit
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.tools.filesystem import create_directory
from pySPACE.resources.dataset_defs.metric import metricdict, \
BinaryClassificationDataset, \
MultinomialClassificationDataset,\
RegressionDataset
import logging
class PerformanceSinkNode(BaseNode):
""" Calculate performance measures from standard prediction vectors and store them
It takes all classification vectors that are passed on to it
from a continuous classifier, calculates the performance measures and
stores them. The results can be later on collected and merged
into one tabular with the
:class:`~pySPACE.missions.operations.node_chain.NodeChainOperation`.
This one can be read manually or it can be
visualized with a gui.
.. note:: FeatureVectorSinkNode was the initial model of this node.
**Parameters**
:evaluation_type:
Define type of incoming results to be processed.
Currently ``binary``
(:class:`~pySPACE.resources.dataset_defs.metric.BinaryClassificationDataset`)
and ``multinomial``
(:class:`~pySPACE.resources.dataset_defs.metric.MultinomialClassificationDataset`)
classification (also denoted as ``multiclass'' classification) and ``regression`` (even for n-dimensional output)
(:class:`~pySPACE.resources.dataset_defs.metric.RegressionDataset`)
metrics can be calculated.
For the multinomial and regression case
several parameters are not yet important.
These are:
* ir_class
* save_roc_points
* calc_AUC
* calc_soft_metrics
* calc_loss
* sum_up_splits
.. warning:: Multinomial classification and regression have not yet
been used often enough with pySPACE and require
additional testing.
(*optional, default: "binary"*)
:ir_class:
The class name (as string) for which IR statistics are to be output.
(*recommended, default: 'Target'*)
:sec_class:
For binary classification the second class (not the *ir_class*)
can be specified. Normally it is detected by default and not
required, except for one_vs_REST scenarios,
where it can not be determined.
(*optional, default: None*)
:save_individual_classifications:
If True, for every processed split a pickle file will be generated
that contains the numerical classification result (cresult) for every
individual window along with the estimated class label (c_est), the
true class label (c_true) and the number of features used (nr_feat).
The result is a list whose elements correspond to a single window and
have the following shape:
::
[ [c_est, cresult, nr_feat], c_true ]
(*optional, default: False*)
:save_roc_points:
If True, for every processed split a pickle file will be generated
that contains a list of tuples (=points) increasing by FP rate, that
can be used to plot a Receiver Operator Curve (ROC) and a list, that
contains the actually used point in the ROC space together with (0|0)
and (1|1). The result has the following shape:
::
( [(fp_rate_1,tp_rate_1), ... ,(fp_rate_n,tp_rate_n)],
[(0.0,0.0), (fp_rate, tp_rate), (1.0,1.0)])
For comparing ROC curves, you can use the analysis GUI
(*performance_results_analysis.py*).
(*optional, default: False*)
:weight:
weight is the weight for the weighted accuracy. For many scenarios
a relevant performance measure is a combination of
True-Positive-Rate (TPR) and True-Negative-Rate (TNR), where one of the
two might be of higher importance than the other, and thus gets a
higher weight. Essentially, the weighted accuracy is
calculated by
.. math:: \\text{Weighted\_accuracy} = weight \\cdot TPR + (1 - weight) \\cdot TNR
If this parameter is not set, the value equals the balanced accuracy.
In the case of `multinomial` classification, this parameter
has to be a dictionary.
(*optional, default: 0.5*)
:measure_times:
measure the average and maximum time that is needed for the processing
of the data between the last sink node in the node chain and this node.
(*optional, default: True*)
:calc_soft_metrics:
integrate uncertainty of classifier into metric
prediction value is projected to interval [-1,1]
(*optional, default: False*)
:calc_train:
Switch for calculating metrics on the training data
(*optional, default: True*)
:calc_AUC:
Calculate the AUC metric
(*optional, default: True*)
:calc_loss:
Integrates the calculated losses into the final csv-file.
(L1, L2), (LDA, SVM, RMM), (restricted, unrestricted)
and (equal weighted, *balanced*) losses are
calculated in all combinations, resulting in 24 entries.
(*optional, default: True*)
:loss_restriction:
Maximum value of the single loss values.
Everything above is reduced to the maximum.
(*optional, default: 2*)
:sum_up_splits:
If you use a CV-Splitter in your node chain, the performance sink adds up
the basic metrics and calculates confusion matrix metrics with these
values. The other metrics are averaged.
So a lot of more testing examples are relevant for the calculation.
(*optional, default: False*)
:dataset_pattern:
If the __Dataset__ is of the form "X_Y_Z", then this pattern can be
specified with this parameter. The different values X, Y, Z will then
appear in corresponding columns in the results.csv. Example: If the
datasets are of the form "NJ89_20111128_3", and one passes the
dataset_pattern "subject_date_setNr", then the results.csv will have
the columns __Subject__, __Date__ and __SetNr__ with the corresponding
values parsed (note the added underscores and capitalized first letter).
(*optional, default: None*)
:decision_boundary:
If your decision boundary is not at zero you should specify this for
the calculation of metrics depending on the prediction values.
Probabilistic classifiers often have a boundary at 0.5.
(*optional, default: 0.0*)
:save_trace:
Generates a table which contains a confusion matrix over time/samples.
There are two types of traces: short traces and long traces.
The short traces contain only the information, if a classification
was a TP, FN, FP or TN. The long traces furthermore contain
loss values and are saved as a dictionary.
To save only short traces (for, e.g. performance reasons),
set save_trace to ``short``.
To save long and short traces, set save_trace to True.
The encoding in trace is:
:TP: 0
:FN: 1
:FP: 2
:TN: 3
(*optional, default: False*)
**Exemplary Call**
.. code-block:: yaml
-
node : Classification_Performance_Sink
parameters :
ir_class : "Target"
weight : 0.5
:input: PredictionVector
:output: ClassificationDataset
:Author: <NAME> (<EMAIL>)
:Created: 2012/08/02
"""
input_types = ["PredictionVector"]
def __init__(self, classes_names=[], ir_class="Target", sec_class=None,
save_individual_classifications=False, save_roc_points=False,
weight=0.5, measure_times=True, calc_soft_metrics=False,
sum_up_splits=False, dataset_pattern=None, calc_AUC=True,
calc_loss=True, calc_train=True, save_trace=False,
decision_boundary=None, loss_restriction=2,
evaluation_type="binary",
**kwargs):
super(PerformanceSinkNode, self).__init__(**kwargs)
if save_roc_points:
calc_AUC = True
if evaluation_type in ["multinomial", "multiclass"]:
evaluation_type = "multinomial"
save_trace = False
save_roc_points = False
calc_AUC = False
calc_loss = False
calc_soft_metrics = False
sum_up_splits = False
cc = MultinomialClassificationDataset(dataset_pattern=
dataset_pattern)
elif evaluation_type == "binary":
cc = BinaryClassificationDataset(dataset_pattern=dataset_pattern)
elif evaluation_type == "regression":
save_trace = False
save_roc_points = False
calc_AUC = False
calc_loss = False
calc_soft_metrics = False
sum_up_splits = False
cc = RegressionDataset(dataset_pattern=dataset_pattern)
store = \
save_individual_classifications or \
save_roc_points or \
self.store or \
save_trace
self.set_permanent_attributes(
ir_class=ir_class.strip(),
classification_dataset=cc,
classes_names=classes_names,
# determined later on for checks in binary classification
sec_class=sec_class,
weight=weight,
save_individual_classifications=save_individual_classifications,
save_roc_points=save_roc_points,
measure_times=measure_times,
calc_soft_metrics=calc_soft_metrics,
example=None,
sum_up_splits=sum_up_splits,
calc_AUC=calc_AUC,
calc_loss=calc_loss,
decision_boundary=decision_boundary,
loss_restriction=loss_restriction,
calc_train=calc_train,
save_trace=save_trace,
store=store,
evaluation_type=evaluation_type,
invert_classification=False)
def reset(self):
""" classification_dataset has to be kept over all splits """
# We have to create a temporary reference since we remove
# the self.permanent_state reference in the next step by overwriting
# self.__dict__
tmp = self.permanent_state
# reset should not delete classification dataset
# if you want to delete the dataset just do it explicitly.
tmp["classification_dataset"] = self.classification_dataset
self.__dict__ = copy.copy(tmp)
self.permanent_state = tmp
def is_trainable(self):
""" Return whether this node is trainable.
.. todo:: Check if return should be False and edit documentation
"""
# Though this node is not really trainable, it returns true in order
# to request the training data from previous notes.
return True
def is_supervised(self):
""" Return whether this node requires supervised training. """
return True
def _train(self, data, label):
# We do nothing
pass
def process_current_split(self):
""" Main processing part on test and training data of current split
Performance metrics are calculated for training and test data separately.
Metrics on training data help to detect errors in classifier construction
and to compare in how far it behaves the same way as on testing data.
The function only collects the data, measures execution times
and calls functions to update confusion matrices.
"""
################
### TRAINING ###
################
self._log("Processing training data",level=logging.INFO)
self.train_classification_outcome = []
self.training_time = 0
if self.measure_times:
start_time_stamp = timeit.default_timer()
for classification_vector, label in self.input_node.request_data_for_training(False):
if self.calc_train:
self.set_helper_parameters(classification_vector,label)
self.train_classification_outcome.append((classification_vector, label))
if self.measure_times:
stop_time_stamp = timeit.default_timer()
self.training_time = stop_time_stamp - start_time_stamp
if self.calc_train and self.evaluation_type == "binary" and not self.train_classification_outcome==[]:
if self.decision_boundary is None and self.train_classification_outcome[0][0].predictor.node_name in \
["PlattsSigmoidFitNode",
"LinearFitNode",
"SigmoidTransformationNode"]:
self.decision_boundary=0.5
elif self.decision_boundary is None:
self.decision_boundary=0
train_result = BinaryClassificationDataset.calculate_metrics(
classification_results=self.train_classification_outcome,
calc_soft_metrics=self.calc_soft_metrics,
invert_classification=self.invert_classification,
ir_class=self.ir_class, sec_class=self.sec_class,
loss_restriction=self.loss_restriction,
time_periods=[],
calc_AUC=self.calc_AUC,calc_loss=self.calc_loss,
weight=self.weight,save_roc_points=self.save_roc_points,
decision_boundary=self.decision_boundary)
try:
train_metrics, self.train_R = train_result
except:
train_metrics = train_result
elif self.calc_train and self.evaluation_type == "multinomial":
train_metrics = MultinomialClassificationDataset.calculate_metrics(
classification_results=self.train_classification_outcome,
weight=self.weight, classes=self.classes_names)
elif self.calc_train and self.evaluation_type == "regression":
train_metrics = RegressionDataset.calculate_metrics(
regression_results=self.train_classification_outcome,
weight=self.weight)
elif not self.train_classification_outcome:
train_metrics = metricdict()
###############
### TESTING ###
###############
self._log("Processing testing data",level=logging.INFO)
# for saving the actual numerical classification results
self.classification_outcome = []
# class\guess ir sec
# ir_class: TP FN
# sec_class: FP TN
# initialization to measure execution speed
self.time_periods = []
if self.measure_times:
self.time_periods = []
start_time_stamp = timeit.default_timer()
self.example = None
for classification_vector, label in \
self.input_node.request_data_for_testing():
if self.measure_times:
stop_time_stamp = timeit.default_timer()
self.time_periods.append(stop_time_stamp - start_time_stamp)
self.set_helper_parameters(classification_vector,label)
self.classification_outcome.append((classification_vector, label))
# re-initialization of time before next item is requested
if self.measure_times:
start_time_stamp = timeit.default_timer()
if self.decision_boundary is None and \
len(self.classification_outcome) > 0 and \
self.classification_outcome[0][0].predictor.node_name in \
["PlattsSigmoidFitNode",
"LinearFitNode",
"SigmoidTransformationNode"]:
self.decision_boundary=0.5
elif self.decision_boundary is None:
self.decision_boundary=0
if self.evaluation_type == "binary":
result = BinaryClassificationDataset.calculate_metrics(
classification_results=self.classification_outcome,
calc_soft_metrics=self.calc_soft_metrics,
invert_classification=self.invert_classification,
ir_class=self.ir_class, sec_class=self.sec_class,
loss_restriction=self.loss_restriction,
time_periods=self.time_periods,
calc_AUC=self.calc_AUC,calc_loss=self.calc_loss,
weight=self.weight,save_roc_points=self.save_roc_points,
decision_boundary=self.decision_boundary)
try:
metrics, self.R = result
except:
metrics = result
elif self.evaluation_type=="multinomial":
metrics = MultinomialClassificationDataset.calculate_metrics(
classification_results=self.classification_outcome,
weight=self.weight)
elif self.evaluation_type=="regression":
metrics = RegressionDataset.calculate_metrics(
regression_results=self.classification_outcome,
weight=self.weight)
# add the training time if training was done
if self.measure_times:
metrics["Training_time"] = self.training_time
try:
classifier_information = self.classification_outcome[0][0].\
predictor.classifier_information
except:
classifier_information=dict()
# add the training metrics
if self.calc_train:
skip_keys = classifier_information.keys()
for key,value in train_metrics.items():
if not key in skip_keys:
metrics["train_"+key] = value
self.classification_dataset.add_split(metrics,
train=False,
split=self.current_split,
run=self.run_number)
if self.save_trace:
self.trace, self.long_trace=self.calculate_classification_trace(
classification_results=self.classification_outcome,
calc_soft_metrics=self.calc_soft_metrics,
ir_class=self.ir_class,
sec_class=self.sec_class,
loss_restriction=self.loss_restriction,
calc_loss=self.calc_loss,
decision_boundary=self.decision_boundary,
save_trace=self.save_trace)
self._log("Metrics added to dataset",level=logging.INFO)
def set_helper_parameters(self, classification_vector, label):
""" Fetch some node parameters from the classification vector """
# get an example for a classification vector for further analysis
if self.example is None:
self.example = classification_vector
if not self.evaluation_type == "binary":
return
try:
self.decision_boundary = \
classification_vector.predictor.classifier_information["decision_boundary"]
except:
pass
if self.decision_boundary is None and classification_vector.predictor.node_name in \
["PlattsSigmoidFitNode",
"LinearFitNode",
"SigmoidTransformationNode"]:
self.decision_boundary = 0.5
elif self.decision_boundary is None:
self.decision_boundary = 0
if (self.example.prediction > self.decision_boundary and
self.example.label == self.ir_class) or \
(self.example.prediction <= self.decision_boundary and not
self.example.label == self.ir_class):
self.invert_classification = False
elif self.evaluation_type == "binary":
self.invert_classification = True
warnings.warn(
"Your ir_class did not get the higher value " +
"from the classifier.\n " +
"Label %s, got value %f.\n" % (self.example.label,
self.example.prediction) +
"You should adjust that and " +
"maybe switch the given class_labels or add " +
"preserve_score: False to the " +
"Threshold_Optimization node! " +
"Furthermore you should check the parameter" +
": decision_boundary!")
if self.evaluation_type == "binary":
if self.sec_class is None:
p_label = classification_vector.label.strip()
if self.sec_class is None and not (p_label == self.ir_class):
self.sec_class = p_label
@classmethod
def calculate_classification_trace(cls,classification_results,
calc_soft_metrics=False,
ir_class="Target", sec_class=None,
loss_restriction=2.0,
calc_loss=False,
decision_boundary=0.0,
save_trace=True):
""" Calculate the classification trace, i.e. TN,TP,FN,FP for every sample
The trace entries are encoded for size reasons as short (trace) or
in a comprehensive version as dicts (long_trace)
The encoding in trace is:
:TP: 0
:FN: 1
:FP: 2
:TN: 3
:Author: <NAME> (<EMAIL>), <NAME> (<EMAIL>)
:Returns: trace, long_trace
"""
trace = []
long_trace = []
for prediction_vector,label in classification_results:
if sec_class is None and not label == ir_class:
sec_class = label
confusion_matrix = metricdict(float)
BinaryClassificationDataset.update_confusion_matrix(prediction_vector,
label,calc_soft_metrics=calc_soft_metrics,
ir_class=ir_class, sec_class=sec_class,
confusion_matrix=confusion_matrix,
decision_boundary=decision_boundary)
#
if calc_loss and not save_trace == "short":
BinaryClassificationDataset.update_loss_values(classification_vector=prediction_vector,
label=label,
ir_class=ir_class, sec_class=sec_class,
loss_dict=confusion_matrix,
loss_restriction=loss_restriction)
if not save_trace == "short":
long_trace.append(confusion_matrix)
if confusion_matrix["True_positives"] == 1:
trace.append(0)
elif confusion_matrix["False_negatives"] == 1:
trace.append(1)
elif confusion_matrix["False_positives"] == 1:
trace.append(2)
elif confusion_matrix["True_negatives"] == 1:
trace.append(3)
else:
raise ValueError("At least one element in the confusion matrix should be 1")
return trace, long_trace
def get_result_dataset(self):
""" Return the result dataset """
if not self.sum_up_splits:
return self.classification_dataset
else:
self.classification_dataset.merge_splits()
return self.classification_dataset
def store_state(self, result_dir, index=None):
""" Stores additional information (classification_outcome, roc_points) in the *result_dir* """
if self.store:
node_dir = os.path.join(result_dir, self.__class__.__name__)
create_directory(node_dir)
if self.save_individual_classifications:
name = 'classification_outcome_sp%s.pickle' % self.current_split
result_file = open(os.path.join(node_dir, name), "wb")
# predictor is a reference to the actual classification node
# object. This can not be pickled! Therefore, replace the
# predictor attribute by the classification node's node_specs
for single_classification in self.classification_outcome:
single_classification[0].predictor = \
single_classification[0].predictor.node_specs
result_file.write(cPickle.dumps(self.classification_outcome, protocol=2))
result_file.close()
if self.save_roc_points:
name = 'roc_points_sp%s.pickle' % self.current_split
result_file = open(os.path.join(node_dir, name), "wb")
result_file.write(cPickle.dumps(self.R, protocol=2))
result_file.close()
if self.save_trace:
name = 'trace_sp%s.pickle' % self.current_split
result_file = open(os.path.join(node_dir, name), "wb")
result_file.write(cPickle.dumps(self.trace, protocol=2))
result_file.close()
if len(self.long_trace) > 0:
name = 'long_trace_sp%s.pickle' % self.current_split
result_file = open(os.path.join(node_dir, name), "wb")
result_file.write(cPickle.dumps(self.long_trace, protocol=2))
result_file.close()
class LeaveOneOutSinkNode(PerformanceSinkNode):
""" Request the leave one out metrics from the input node
**Parameters**
see: :class:`PerformanceSinkNode`
**Exemplary Call**
.. code-block:: yaml
-
node : LOO_Sink
parameters :
ir_class : "Target"
"""
def process_current_split(self):
""" Get training results and input node metrics """
### TRAINING ### # code copy till main part
self.train_classification_outcome = []
if self.measure_times:
start_time_stamp = timeit.default_timer()
for classification_vector, label in \
self.input_node.request_data_for_training(False):
if self.calc_train:
self.set_helper_parameters(classification_vector,label)
self.train_classification_outcome.append((classification_vector, label))
if self.measure_times:
stop_time_stamp = timeit.default_timer()
self.training_time = stop_time_stamp - start_time_stamp
if self.calc_train:
train_result = BinaryClassificationDataset.calculate_metrics(
classification_results=self.train_classification_outcome,
calc_soft_metrics=self.calc_soft_metrics,
invert_classification=self.invert_classification,
ir_class=self.ir_class, sec_class=self.sec_class,
loss_restriction=self.loss_restriction,
time_periods=[],
calc_AUC=self.calc_AUC, calc_loss=self.calc_loss,
weight=self.weight, save_roc_points=self.save_roc_points,
decision_boundary=self.decision_boundary)
try:
train_metrics,self.train_R = train_result
except:
train_metrics = train_result
######################### Main Part #########################
try:
metrics = copy.deepcopy(self.train_classification_outcome[0][0].predictor.loo_metrics)
except AttributeError:
warnings.warn("Input node does not provide LOO metrics.")
metrics = metricdict(float)
#############################################################
# add the training time #Code copy from here
if self.measure_times:
metrics["Training_time"] = self.training_time
# add the training metrics
if self.calc_train:
try:
classifier_information = \
self.train_classification_outcome[0][0].predictor.\
classifier_information
except:
classifier_information=dict()
skip_keys=classifier_information.keys()
for key,value in train_metrics.items():
if not key in skip_keys:
metrics["train_"+key] = value
self.classification_dataset.add_split(metrics,
train=False,
split=self.current_split,
run=self.run_number)
class SlidingWindowSinkNode(PerformanceSinkNode):
""" Calculate and store performance measures from classifications of sliding windows
This node inherits most of its functionality from *PerformanceSinkNode*.
Thus, for parameter description of super class parameters see documentation
of *PerformanceSinkNode*.
Additionally the following functionality is provided:
1) The definition of uncertain areas, which are excluded in the metrics
calculation process, are possible. This is useful for sliding window
classification, i.e. if the true label is not known in each sliding step.
2) It is possible to label the test data only now. For that an epoch signal
(e.g. a movement marker window) must be specified.
3) Instead of excluding sliding windows from classifier evaluation, the
'true' label function shape (a step function, which is zero for the
negative class and one for the positive class) can be somehow fit in the
uncertain range. At the moment there is only one way for doing this:
* from_right_count_negatives: Find the point where prediction of the
negative class starts by searching backwards
in time. There can be specified how many
'outliers' are ignored, i.e. how stable the
prediction has to be.
**Parameters**
:uncertain_area:
A list of tuples of the lower and the upper time value (in ms) for which
no metrics calculation is done. The values should be given with respect
to the last window of an epoch, i.e. sliding window series (which has
time value zero).
If additionally *determine_labels* is specified then the first tuple of
*uncertain_area* describes the bounds in which the label-change-point is
determined. The lower bound should be the earliest time point when the
detection makes sense; the upper bound should be the earliest time point
when there MUST BE a member of the positive class.
(*optional, default: None*)
:sliding_step:
The time (in ms) between two consecutive windows.
(*optional, default: 50*)
:determine_labels:
If specified the label-change-point (index where the class label changes
from negative to positive class) is determined for every epoch. This is
done via counting the occurrence of negative classified sliding windows
from the index point where the positive class is sure
(uncertain_area[1]) to the index point where the negative class is sure
(uncertain_area[0]) If *determine_labels* instances were found in
consecutively windows the label-change-point is has been reached.
If *determine_labels* > 1, the methods accounts for outliers.
.. note:: Using this option makes it hard to figure out to which true
class errors pertain (since it is somehow arbitrary). You
should be careful which metric you analyze for performance
evaluation (different class instance costs can't be modeled).
:epoch_signal:
The class name (label) of the event that marks the end of an epoch,
e.g. the movement. This can be used when null_marker windows (of an
unknown class) and a signal window which marks the event were cut out.
With respect to this event the former windows will be relabeled
according to *classes_names*.
(*optional, default: None*)
:epoch_eval:
If True, evaluation is done per epoch, i.e. per movement. Performance
metrics are averaged across epochs for every split. This option might
be necessary if the epochs have variable length, i.e. the class
distribution alters in every epoch.
(*optional, default: False*)
:save_score_plot:
If True a plot is stored which shows the average prediction value
against the time point of classification.
(*optional, default: False*)
:save_trial_plot:
If True a plot is stored which shows developing of the prediction
scores for each single trial.
(*optional, default: False*)
:save_time_plot:
If True a plot is stored which shows the predicted labels for all
trials across time.
(*optional, default: False*)
:sort:
If True the data has to be sorted according to the time (encoded in the
tag attribute. Be aware that this only makes sense for data sets with
unique time tags.
(*optional, default: False*)
:unused_win_defs:
List of window definition names which shall not be used for evaluation.
(*optional, default: []*)
**Exemplary Call**
.. code-block:: yaml
-
node : Sliding_Window_Performance_Sink
parameters :
ir_class : "LRP"
classes_names : ['NoLRP','LRP']
uncertain_area : ['(-600,-350)']
calc_soft_metrics : True
save_score_plot : True
:input: PredictionVector
:output: ClassificationCollection
:Author: <NAME> (<EMAIL>)
:Created: 2011/01/23
"""
def __init__(self,
uncertain_area=None, sliding_step=50, save_score_plot=False,
save_trial_plot=False, save_time_plot=False,
determine_labels=None, epoch_eval=False, epoch_signal=None,
sort=False, unused_win_defs=[], **kwargs):
if epoch_eval:
kwargs["save_roc_points"] = False
kwargs["calc_AUC"] = False
super(SlidingWindowSinkNode,self).__init__(**kwargs)
self.set_permanent_attributes(uncertain_area=uncertain_area,
sliding_step=sliding_step,
determine_labels=determine_labels,
epoch_signal=epoch_signal,
epoch_eval=epoch_eval,
save_score_plot=save_score_plot,
save_trial_plot=save_trial_plot,
save_time_plot=save_time_plot,
sort=sort,
unused_win_defs=unused_win_defs)
if self.store == False:
self.store = save_score_plot or save_trial_plot or save_time_plot
def process_current_split(self):
""" Compute for the current split of training and test data performance
one sliding windows.
"""
### TRAINING ###
# Code from classificationSinkNode #
self._log("Processing training data", level=logging.INFO)
self.train_classification_outcome = []
self.training_time = 0
if self.measure_times:
start_time_stamp = timeit.default_timer()
for classification_vector, label in self.input_node.request_data_for_training(False):
if classification_vector.specs["wdef_name"] in self.unused_win_defs:
continue
if self.calc_train:
self.set_helper_parameters(classification_vector,label)
self.train_classification_outcome.append((classification_vector,
label))
if self.measure_times:
stop_time_stamp = timeit.default_timer()
self.training_time = stop_time_stamp - start_time_stamp
# we assume that in the training case no sliding windows are used, i.e.,
# the windows have a known true label
if self.calc_train and self.evaluation_type == "binary" and not self.train_classification_outcome==[]:
if self.decision_boundary is None and \
self.train_classification_outcome[0][0].predictor.node_name in [
"PlattsSigmoidFitNode", "LinearFitNode", "SigmoidTransformationNode"]:
self.decision_boundary = 0.5
elif self.decision_boundary is None:
self.decision_boundary = 0
train_result = BinaryClassificationDataset.calculate_metrics(
classification_results=self.train_classification_outcome,
calc_soft_metrics=self.calc_soft_metrics,
invert_classification=self.invert_classification,
ir_class=self.ir_class, sec_class=self.sec_class,
loss_restriction=self.loss_restriction,
time_periods=[],
calc_AUC=self.calc_AUC,calc_loss=self.calc_loss,
weight=self.weight,save_roc_points=self.save_roc_points,
decision_boundary=self.decision_boundary)
try:
train_metrics,self.train_R = train_result
except:
train_metrics = train_result
elif self.calc_train and self.evaluation_type == "multinomial":
train_metrics = MultinomialClassificationDataset.calculate_metrics(
classification_results=self.train_classification_outcome,
weight=self.weight)
elif self.calc_train and self.evaluation_type == "regression":
train_metrics = RegressionDataset.calculate_metrics(
regression_results=self.train_classification_outcome,
weight=self.weight)
elif not self.train_classification_outcome:
train_metrics = metricdict()
# TESTING
self._log("Processing testing data",level=logging.INFO)
# for saving the actual numerical classification results
self.classification_outcome = []
# class\guess ir sec
# ir_class: TP FN
# sec_class: FP TN
# initialization to measure execution speed
self.time_periods = []
if self.measure_times:
start_time_stamp = timeit.default_timer()
for classification_vector, label in \
self.input_node.request_data_for_testing():
if self.measure_times:
stop_time_stamp = timeit.default_timer()
self.time_periods.append(stop_time_stamp - start_time_stamp)
# parse 'tag': 'Epoch Start: 395772ms; End: 396772ms; Class: Target'
classification_vector.specs['start_time']= \
float(classification_vector.tag.split(';')[0].split(':')[1].strip('ms'))
classification_vector.specs['end_time']= \
float(classification_vector.tag.split(';')[1].split(':')[1].strip('ms'))
self.set_helper_parameters(classification_vector,label)
self.classification_outcome.append((classification_vector,label))
if self.measure_times:
start_time_stamp = timeit.default_timer()
if self.sort:
# sort classification vectors in time
self.classification_outcome.sort(key=lambda tupel:tupel[0].specs['start_time'])
if self.decision_boundary is None and len(self.classification_outcome) \
> 0 and self.classification_outcome[0][0].predictor.node_name \
in ["PlattsSigmoidFitNode", "LinearFitNode",
"SigmoidTransformationNode"]:
self.decision_boundary = 0.5
elif self.decision_boundary is None:
self.decision_boundary = 0
self.data_time = dict()
if self.epoch_signal is not None:
marker = 0
self.data_time[marker] = []
# split according to signal
for classification_vector, label in self.classification_outcome:
if label == self.epoch_signal:
marker += 1
self.data_time[marker] = []
else:
self.data_time[marker].append((classification_vector, label))
del self.data_time[marker]
else:
# split windows according to the time
last_window_end_time = 0.0
marker = -1
for classification_vector, label in self.classification_outcome:
if classification_vector.specs['start_time'] > \
last_window_end_time or \
classification_vector.specs['end_time'] < \
last_window_end_time:
marker += 1
self.data_time[marker] = [(classification_vector, label)]
elif classification_vector.specs['end_time'] == \
last_window_end_time + self.sliding_step:
self.data_time[marker].append((classification_vector,
label))
elif "bis-2000" in classification_vector.specs['wdef_name']:
marker += 1
self.data_time[marker] = [(classification_vector, label)]
else:
# TODO: overlapping epochs - what shall we do???
# may be store it with marker = -1 and handle it afterwards
self._log("Error: Overlapping epochs in Sink detected!",
level=logging.ERROR)
#raise Exception("Overlapping epochs in Sink detected!")
last_window_end_time = classification_vector.specs['end_time']
# delete uncertain classification outcomes or relabel data in
# self.classification_outcome and calculate the confusion matrix
self.classification_outcome = []
self.label_change_points = []
performance = None
for k in self.data_time.keys():
if self.determine_labels:
# calculate uncertain indices
nr_sliding_windows = len(self.data_time[k])
if self.uncertain_area!=None:
bound_indices = range(nr_sliding_windows - \
abs(self.uncertain_area[0][0])/self.sliding_step - 1,
nr_sliding_windows-abs(self.uncertain_area[0][1])/ \
self.sliding_step)
if len(self.uncertain_area)>1:
uncertain_indices = []
for t in self.uncertain_area[1:]:
uncertain_indices.extend(range(nr_sliding_windows - \
abs(t[0])/self.sliding_step - 1,
nr_sliding_windows-abs(t[1])/ \
self.sliding_step))
else:
uncertain_indices = []
else: # if not specified, assume unbound
bound_indices = range(nr_sliding_windows)
uncertain_indices = []
label_change_point = self.from_right_count_negatives(
self.data_time[k], self.determine_labels, bound_indices)
self.label_change_points.append(label_change_point)
for index, (classification_vector, label) \
in enumerate(self.data_time[k]):
if index not in uncertain_indices:
if index < label_change_point: # assume neg class
self.classification_outcome.append(
(classification_vector, self.classes_names[0]))
else: # assume that last elem in trial has correct label
self.classification_outcome.append(
(classification_vector,
self.data_time[k][-1][1]))
else:
# calculate uncertain indices
if self.uncertain_area!=None:
nr_sliding_windows = len(self.data_time[0])
uncertain_indices = []
for t in self.uncertain_area:
uncertain_indices.extend(range(nr_sliding_windows - \
abs(t[0])/self.sliding_step - 1,
nr_sliding_windows-abs(t[1]) / self.sliding_step))
else:
uncertain_indices = []
for index, (classification_vector, label) \
in enumerate(self.data_time[k]):
if index not in uncertain_indices:
if self.epoch_signal:
if index < uncertain_indices[0]: # negative class
new_label = self.classes_names[0]
else: # assume last elem in trial has correct label
new_label = self.data_time[k][-1][1]
self.classification_outcome.append(
(classification_vector, new_label))
if self.epoch_eval:
result = self.get_result_metrics()
if performance == None:
performance = result
else: # combine with old performance
new_performance = result
performance = self.combine_perf_dict(performance,
new_performance, k+1)
self.classification_outcome = []
if not self.epoch_eval:
result = self.get_result_metrics()
try:
performance, self.R = result
except:
performance = result
# add the training time
if self.measure_times:
performance["Training_time"] = self.training_time
try:
classifier_information = self.classification_outcome[0][0].\
predictor.classifier_information
except:
classifier_information = dict()
# add the training metrics
if self.calc_train:
skip_keys = classifier_information.keys()
for key, value in train_metrics.items():
if not key in skip_keys:
performance["train_"+key] = value
if self.determine_labels:
performance["~~Avg_Label_Change_Index~~"] = \
numpy.mean(self.label_change_points)
self.classification_dataset.add_split(performance, train=False,
split=self.current_split,
run=self.run_number)
if self.save_trace:
self.trace, self.long_trace=self.calculate_classification_trace(
classification_results=self.classification_outcome,
calc_soft_metrics=self.calc_soft_metrics,
ir_class=self.ir_class, sec_class=self.sec_class,
loss_restriction=self.loss_restriction,
calc_loss=self.calc_loss,
decision_boundary=self.decision_boundary,
save_trace=self.save_trace)
self._log("Metrics added to dataset",level=logging.INFO)
def get_result_metrics(self):
""" Calculate metrics based on evaluation type """
if self.evaluation_type == 'binary':
result = BinaryClassificationDataset.calculate_metrics(
classification_results=self.classification_outcome,
calc_soft_metrics=self.calc_soft_metrics,
invert_classification=self.invert_classification,
ir_class=self.ir_class, sec_class=self.sec_class,
loss_restriction=self.loss_restriction,
time_periods=self.time_periods, weight=self.weight,
calc_AUC=self.calc_AUC, calc_loss=self.calc_loss,
save_roc_points=self.save_roc_points,
decision_boundary=self.decision_boundary)
elif self.evaluation_type == 'multinomial':
result = MultinomialClassificationDataset.calculate_metrics(
classification_results=self.classification_outcome,
weight=self.weight)
elif self.evaluation_type=="regression":
result = RegressionDataset.calculate_metrics(
regression_results=self.classification_outcome,
weight=self.weight)
return result
def from_right_count_negatives(self, y, target_number, bounds):
"""Go through the bounded y (reverse) and find the index i, where
target_number values have been consecutively the negative class.
Return i+target_number as critical index point (labels change)"""
countNegatives = 0
countTotal = 0
for index in range(len(y)-1,-1,-1):
if index not in bounds:
continue
countTotal+=1
if y[index][0].label==self.classes_names[0]:
countNegatives+=1
if countNegatives==target_number:
if countTotal==target_number:
return bounds[-1]
else:
return index+target_number
else:
countNegatives=0
return bounds[0]+countNegatives
def combine_perf_dict(self, old_dict, new_dict, weight):
""" Combine the values of the dicts by a weighting (iterative) average
.. math:: \\frac{weight-1}{weight} \\cdot \\text{old\_dict} + \\frac{1}{weight} \\cdot \\text{new\_dict}
"""
return_dict = dict()
for key in old_dict.keys():
try:
return_dict[key] = (weight-1.0)/weight * old_dict[key] + \
1.0/weight * new_dict[key]
except TypeError: # for Strings (like parameters)
# they should not be different among epochs
if old_dict[key] == new_dict[key]:
return_dict[key] = old_dict[key]
return return_dict
def store_state(self, result_dir, index=None):
""" Stores additional information in the given directory *result_dir* """
if self.store:
node_dir = os.path.join(result_dir, self.__class__.__name__)
create_directory(node_dir)
super(SlidingWindowSinkNode,self).store_state(result_dir)
if self.save_score_plot or self.save_trial_plot or self.save_time_plot:
import matplotlib.pyplot as plt
data = [[pv.prediction for pv, _ in self.data_time[k]] for \
k in self.data_time.keys()]
if self.save_time_plot:
label_data = [numpy.array([(pv.label,
float(pv.tag.split(';')[1].split(':')[1][:-2])) \
for pv, _ in self.data_time[k]]) \
for k in self.data_time.keys()]
fig = plt.figure()
ax1 = plt.subplot(111)
ax1.yaxis.grid(True, linestyle='-', which='major', color='grey',
alpha=0.5)
ax1.xaxis.grid(True, linestyle='-', which='major', color='grey',
alpha=0.5)
for trial in label_data:
ind = numpy.arange(-1.0*(len(trial)-1)*self.sliding_step,
50, 50)
x = [ind[i] for i, (label, start_time) in enumerate(trial) \
if label == self.ir_class]
y = [start_time] * len(x)
if x == []:
plt.plot(ind[0],start_time,'ro')
else:
plt.plot(x, y,'bo')
plt.xlabel("Time (ms)")
plt.ylabel("Start time of trial (s)")
name = 'trail-time_plot_sp%s.pdf' % self.current_split
plt.savefig(os.path.join(node_dir,name),dpi=None,facecolor='w',
edgecolor='w',orientation='portrait',papertype=None,
format=None,transparent=False)
plt.close(fig)
if self.save_score_plot:
max_time = max([len(trial) for trial in data])
data_across_time = [[] for time_step in range(max_time)]
for trial in data:
for i, elem in enumerate(trial[::-1]):
data_across_time[i].append(elem)
means = [numpy.mean(time_step) for time_step in data_across_time]
stds = [numpy.std(time_step) for time_step in data_across_time]
ind = | numpy.arange(-1.0*(max_time-1)*self.sliding_step, 50, 50) | numpy.arange |
from swan import pycwt
import numpy as np
import matplotlib.pyplot as plt
import wave
#import struct
from scipy import fromstring, int16
#from pylab import *
#from scipy import signal
wavfile = 'hirakegoma.wav'
#wavfile = 'ohayo.wav'
wr = wave.open(wavfile, "rb")
ch = wr.getnchannels()
width = wr.getsampwidth()
fr = wr.getframerate()
fn = wr.getnframes()
fs = fn / fr
print('ch', ch)
print('frame', fn)
print('fr',fr)
print('sampling fs ', fs, 'sec')
print('width', width)
origin = wr.readframes(wr.getnframes())
data = origin[:fn]
wr.close()
amp = max(data)
print(amp)
print('len of origin', len(origin))
print('len of sampling: ', len(data))
# ステレオ前提 > monoral
y = | np.frombuffer(data, dtype="int16") | numpy.frombuffer |
# coding=utf-8
# Copyright 2021 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ..test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTFeatureExtractor
class ImageGPTFeatureExtractionTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=18,
do_normalize=True,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
def prepare_feat_extract_dict(self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
]
),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class ImageGPTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = ImageGPTFeatureExtractor if is_vision_available() else None
def setUp(self):
self.feature_extract_tester = ImageGPTFeatureExtractionTester(self)
@property
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_properties(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feature_extractor, "clusters"))
self.assertTrue(hasattr(feature_extractor, "do_resize"))
self.assertTrue(hasattr(feature_extractor, "size"))
self.assertTrue(hasattr(feature_extractor, "do_normalize"))
def test_feat_extract_to_json_string(self):
feat_extract = self.feature_extraction_class(**self.feat_extract_dict)
obj = json.loads(feat_extract.to_json_string())
for key, value in self.feat_extract_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(value, obj[key]))
else:
self.assertEqual(obj[key], value)
def test_feat_extract_to_json_file(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, "feat_extract.json")
feat_extract_first.to_json_file(json_file_path)
feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path).to_dict()
feat_extract_first = feat_extract_first.to_dict()
for key, value in feat_extract_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(value, feat_extract_second[key]))
else:
self.assertEqual(feat_extract_first[key], value)
def test_feat_extract_from_and_save_pretrained(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
feat_extract_first.save_pretrained(tmpdirname)
feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname).to_dict()
feat_extract_first = feat_extract_first.to_dict()
for key, value in feat_extract_first.items():
if key == "clusters":
self.assertTrue( | np.array_equal(value, feat_extract_second[key]) | numpy.array_equal |
__copyright__ = "Copyright (c) Microsoft Corporation and Mila - Quebec AI Institute"
__license__ = "MIT"
"""Noise distributions for locations in the sim.
"""
__all__ = (
"RandomMiddleLocation",
"CenterLocation",
"RandomEdgeLocation",
"RandomUniformLocation",
"RandomBottomLocation",
"RandomRightLocation",
"RandomTopLocation",
"RandomTopRightLocation",
"RandomBottomRightLocation",
"RandomLeftLocation",
"RandomBottomLeftLocation",
"RandomTopLeftLocation",
)
import random
from typing import Tuple, Union
import numpy as np
from scipy.stats import uniform
from segar.factors import Noise, Position
def _get_boundaries(min_distance: float = 0.1) -> Tuple[float, float]:
"""Adds a margin to boundaries if distances are enforced.
"""
low, high = (-1.0, 1.0)
if min_distance:
low += min_distance / 2.0
high -= min_distance / 2.0
return low, high
# These methods are for sampling from specific locations around the arena.
class Position2D(Noise[np.ndarray]):
def __init__(self, lows: np.ndarray, highs: np.ndarray):
self.lows = lows
self.highs = highs
self._dists = [uniform(lows[0], highs[0]), uniform(lows[1], highs[1])]
super().__init__(params=dict(lows=lows, highs=highs))
def cdf(self, samples: np.ndarray) -> Union[np.ndarray, None]:
cdfs = [dist.cdf(samples[i]) for i, dist in enumerate(self._dists)]
return np.prod(cdfs, axis=1)
def log_cdf(self, samples: np.ndarray) -> Union[np.ndarray, None]:
log_cdfs = [dist.logcdf(samples[i]) for i, dist in enumerate(self._dists)]
return sum(log_cdfs)
def pdf(self, samples: np.ndarray) -> Union[np.ndarray, None]:
pdfs = [dist.pdf(samples[i]) for i, dist in enumerate(self._dists)]
return np.prod(pdfs, axis=1)
def log_pdf(self, samples: np.ndarray) -> Union[np.ndarray, None]:
log_pdfs = [dist.logpdf(samples[i]) for i, dist in enumerate(self._dists)]
return sum(log_pdfs)
def sample(self) -> Position:
return Position(np.random.uniform(self.lows, self.highs))
class RandomMiddleLocation(Position2D):
def __init__(self):
low, high = _get_boundaries()
mid = (high + low) / 2.0
mid1 = (mid + low) / 2.0
mid2 = (high + mid) / 2.0
super().__init__(np.array([mid1, mid1]), np.array([mid2, mid2]))
class CenterLocation(Position2D):
def __init__(self):
low, high = _get_boundaries()
mid = (high + low) / 2.0
super().__init__(np.array([mid, mid]), np.array([mid, mid]))
class RandomEdgeLocation(Noise[np.ndarray]):
def __init__(self):
self.low, self.high = _get_boundaries()
mid = (self.high + self.low) / 2.0
self.q = (mid + self.low) / 2.0
super().__init__(params=dict(low=self.low, high=self.high, q=self.q))
def cdf(self, samples: np.ndarray) -> Union[np.ndarray, None]:
xs = samples[0]
ys = samples[1]
def _cdf(x):
if x < self.low:
return 0.0
elif self.low <= x < self.high - self.q:
return uniform(self.low, self.q).cdf(x)
elif self.high - self.q <= x:
return uniform(self.high - self.q, self.high).cdf(x)
else:
raise ValueError
xcdfs = np.array([map(_cdf, xs)])
ycdfs = np.array([map(_cdf, ys)])
return xcdfs * ycdfs
def log_cdf(self, samples: np.ndarray) -> Union[np.ndarray, None]:
return np.log(self.cdf(samples))
def pdf(self, samples: np.ndarray) -> Union[np.ndarray, None]:
xs = samples[0]
ys = samples[1]
def _pdf(x):
if x < self.low:
return 0.0
elif self.low <= x < self.high - self.q:
return uniform(self.low, self.q).pdf(x)
elif self.high - self.q <= x:
return uniform(self.high - self.q, self.high).pdf(x)
else:
raise ValueError
xpdfs = np.array([map(_pdf, xs)])
ypdfs = np.array([map(_pdf, ys)])
return xpdfs * ypdfs
def log_pdf(self, samples: np.ndarray) -> Union[np.ndarray, None]:
return np.log(self.pdf(samples))
def sample(self) -> Position:
# Bottom left "tile"
pos = | np.random.uniform(self.low, self.q, (2,)) | numpy.random.uniform |
# plotting
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
# numpy
import numpy as np
# scipy
import scipy as sp
import scipy.interpolate
from scipy.special import erfinv, erf
from scipy.stats import poisson as pss
import scipy.fftpack
import scipy.sparse
# jit
from numba import jit
import ctypes
import astropy
import astropy as ap
from astropy.convolution import convolve_fft, AiryDisk2DKernel
import pickle
# multiprocessing
import multiprocessing as mp
from copy import deepcopy
# utilities
import os, time, sys, glob, fnmatch, inspect, traceback, functools
# HealPix
import healpy as hp
# ignore warnings if not in diagnostic mode
import warnings
#seterr(divide='raise', over='raise', invalid='raise')
#seterr(all='raise')
#seterr(under='ignore')
#warnings.simplefilter('ignore')
#np.set_printoptions(linewidth=180)
#sns.set(context='poster', style='ticks', color_codes=True)
import h5py
# utilities
# secondaries
## Symbolic Jacobian calculation
#import sympy
# tdpy
import tdpy
from tdpy.util import summgene
# photometry related
### find the spectra of sources
def retr_spec(gdat, flux, sind=None, curv=None, expc=None, sindcolr=None, elin=None, edisintp=None, sigm=None, gamm=None, spectype='powr', plot=False):
if gdat.numbener == 1:
spec = flux[None, :]
else:
if plot:
meanener = gdat.meanpara.enerplot
else:
meanener = gdat.meanpara.ener
if gmod.spectype == 'gaus':
spec = 1. / edis[None, :] / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis[None, :])**2)
if gmod.spectype == 'voig':
args = (gdat.meanpara.ener[:, None] + 1j * gamm[None, :]) / np.sqrt(2.) / sigm[None, :]
spec = 1. / sigm[None, :] / np.sqrt(2. * pi) * flux[None, :] * real(scipy.special.wofz(args))
if gmod.spectype == 'edis':
edis = edisintp(elin)[None, :]
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'pvoi':
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'lore':
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'powr':
spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :])
if gmod.spectype == 'colr':
if plot:
spec = np.zeros((gdat.numbenerplot, flux.size))
else:
spec = np.empty((gdat.numbener, flux.size))
for i in gdat.indxener:
if i < gdat.indxenerpivt:
spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i])
elif i == gdat.indxenerpivt:
spec[i, :] = flux
else:
spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i-1])
if gmod.spectype == 'curv':
spec = flux[None, :] * meanener[:, None]**(-sind[None, :] - gdat.factlogtenerpivt[:, None] * curv[None, :])
if gmod.spectype == 'expc':
spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :]) * np.exp(-(meanener - gdat.enerpivt)[:, None] / expc[None, :])
return spec
### find the surface brightness due to one point source
def retr_sbrtpnts(gdat, lgal, bgal, spec, psfnintp, indxpixlelem):
# calculate the distance to all pixels from each point source
dist = retr_angldistunit(gdat, lgal, bgal, indxpixlelem)
# interpolate the PSF onto the pixels
if gdat.kernevaltype == 'ulip':
psfntemp = psfnintp(dist)
if gdat.kernevaltype == 'bspx':
pass
# scale by the PS spectrum
sbrtpnts = spec[:, None, None] * psfntemp
return sbrtpnts
def retr_psfnwdth(gdat, psfn, frac):
'''
Return the PSF width
'''
wdth = np.zeros((gdat.numbener, gdat.numbevtt))
for i in gdat.indxener:
for m in gdat.indxevtt:
psfntemp = psfn[i, :, m]
indxanglgood = np.argsort(psfntemp)
intpwdth = max(frac * np.amax(psfntemp), np.amin(psfntemp))
if intpwdth >= np.amin(psfntemp[indxanglgood]) and intpwdth <= np.amax(psfntemp[indxanglgood]):
wdthtemp = sp.interpolate.interp1d(psfntemp[indxanglgood], gdat.binspara.angl[indxanglgood], fill_value='extrapolate')(intpwdth)
else:
wdthtemp = 0.
wdth[i, m] = wdthtemp
return wdth
# lensing-related
def samp_lgalbgalfromtmpl(gdat, probtmpl):
indxpixldraw = np.random.choice(gdat.indxpixl, p=probtmpl)
lgal = gdat.lgalgrid[indxpixldraw] + randn(gdat.sizepixl)
bgal = gdat.bgalgrid[indxpixldraw] + randn(gdat.sizepixl)
return lgal, bgal
## custom random variables, pdfs, cdfs and icdfs
### probability distribution functions
def retr_lprbpois(data, modl):
lprb = data * np.log(modl) - modl - sp.special.gammaln(data + 1)
return lprb
### probability density functions
def pdfn_self(xdat, minm, maxm):
pdfn = 1. / (maxm - minm)
return pdfn
def pdfn_expo(xdat, maxm, scal):
if (xdat > maxm).any():
pdfn = 0.
else:
pdfn = 1. / scal / (1. - np.exp(-maxm / scal)) * np.exp(-xdat / scal)
return pdfn
def pdfn_dexp(xdat, maxm, scal):
pdfn = 0.5 * pdfn_expo(np.fabs(xdat), maxm, scal)
return pdfn
def pdfn_dpow(xdat, minm, maxm, brek, sloplowr, slopuppr):
if np.isscalar(xdat):
xdat = np.array([xdat])
faca = 1. / (brek**(sloplowr - slopuppr) * (brek**(1. - sloplowr) - minm**(1. - sloplowr)) / \
(1. - sloplowr) + (maxm**(1. - slopuppr) - brek**(1. - slopuppr)) / (1. - slopuppr))
facb = faca * brek**(sloplowr - slopuppr) / (1. - sloplowr)
pdfn = np.empty_like(xdat)
indxlowr = np.where(xdat <= brek)[0]
indxuppr = np.where(xdat > brek)[0]
if indxlowr.size > 0:
pdfn[indxlowr] = faca * brek**(sloplowr - slopuppr) * xdat[indxlowr]**(-sloplowr)
if indxuppr.size > 0:
pdfn[indxuppr] = faca * xdat[indxuppr]**(-slopuppr)
return pdfn
def pdfn_powr(xdat, minm, maxm, slop):
norm = (1. - slop) / (maxm**(1. - slop) - minm**(1. - slop))
pdfn = norm * xdat**(-slop)
return pdfn
def pdfn_logt(xdat, minm, maxm):
pdfn = 1. / (np.log(maxm) - np.log(minm)) / xdat
return pdfn
def pdfn_igam(xdat, slop, cutf):
pdfn = sp.stats.invgamma.pdf(xdat, slop - 1., scale=cutf)
return pdfn
def pdfn_lnor(xdat, mean, stdv):
pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)
return pdfn
def pdfn_gaus(xdat, mean, stdv):
pdfn = 1. / np.sqrt(2. * pi) / stdv * np.exp(-0.5 * ((xdat - mean) / stdv)**2)
return pdfn
def pdfn_lgau(xdat, mean, stdv):
pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)
return pdfn
def pdfn_atan(para, minmpara, maxmpara):
pdfn = 1. / (para**2 + 1.) / (np.arctan(maxmpara) - np.arctan(minmpara))
return pdfn
def cdfn_paragenrscalbase(gdat, strgmodl, paragenrscalbase, thisindxparagenrbase):
gmod = getattr(gdat, strgmodl)
scalparagenrbase = gmod.scalpara.genrbase[thisindxparagenrbase]
if scalparagenrbase == 'self' or scalparagenrbase == 'logt' or scalparagenrbase == 'atan':
listminmparagenrscalbase = gmod.minmpara.genrbase[thisindxparagenrbase]
factparagenrscalbase = gmod.factparagenrscalbase[thisindxparagenrbase]
if scalparagenrbase == 'self':
paragenrscalbaseunit = cdfn_self(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)
elif scalparagenrbase == 'logt':
paragenrscalbaseunit = cdfn_logt(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)
elif scalparagenrbase == 'atan':
gmod.listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[thisindxparagenrbase]
paragenrscalbaseunit = cdfn_atan(paragenrscalbase, listminmparagenrscalbase, gmod.listmaxmparagenrscalbase)
elif scalparagenrbase == 'gaus' or scalparagenrbase == 'eerr':
gmod.listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[thisindxparagenrbase]
gmod.liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[thisindxparagenrbase]
if scalparagenrbase == 'eerr':
gmod.cdfnlistminmparagenrscalbaseunit = gmod.cdfnlistminmparagenrscalbaseunit[thisindxparagenrbase]
gmod.listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[thisindxparagenrbase]
paragenrscalbaseunit = cdfn_eerr(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase, \
gmod.cdfnlistminmparagenrscalbaseunit, gmod.listparagenrscalbaseunitdiff)
else:
paragenrscalbaseunit = cdfn_gaus(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase)
elif scalparagenrbase == 'pois':
paragenrscalbaseunit = paragenrscalbase
if gdat.booldiagmode:
if paragenrscalbaseunit == 0:
print('Warning. CDF is zero.')
return paragenrscalbaseunit
def icdf_paragenrscalfull(gdat, strgmodl, paragenrunitfull, indxparagenrfullelem):
gmod = getattr(gdat, strgmodl)
# tobechanged
# temp -- change zeros to empty
paragenrscalfull = np.zeros_like(paragenrunitfull)
for scaltype in gdat.listscaltype:
listindxparagenrbasescal = gmod.listindxparagenrbasescal[scaltype]
if len(listindxparagenrbasescal) == 0:
continue
paragenrscalfull[listindxparagenrbasescal] = icdf_paragenrscalbase(gdat, strgmodl, paragenrunitfull[listindxparagenrbasescal], scaltype, listindxparagenrbasescal)
if not np.isfinite(paragenrscalfull).all():
raise Exception('')
if indxparagenrfullelem is not None:
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
indxparagenrfulltemp = indxparagenrfullelem[l][gmod.namepara.genrelem[l][g]]
if indxparagenrfulltemp.size == 0:
continue
paragenrscalfull[indxparagenrfulltemp] = icdf_trap(gdat, strgmodl, paragenrunitfull[indxparagenrfulltemp], paragenrscalfull, \
gmod.listscalparagenrelem[l][g], gmod.namepara.genrelem[l][g], l)
if gdat.booldiagmode:
if not np.isfinite(paragenrscalfull[indxparagenrfulltemp]).all():
raise Exception('')
if not np.isfinite(paragenrscalfull).all():
raise Exception('')
return paragenrscalfull
def icdf_paragenrscalbase(gdat, strgmodl, paragenrunitbase, scaltype, indxparagenrbasescal):
gmod = getattr(gdat, strgmodl)
if scaltype == 'self' or scaltype == 'logt' or scaltype == 'atan':
minmparagenrscalbase = gmod.minmpara.genrbase[indxparagenrbasescal]
factparagenrscalbase = gmod.factpara.genrbase[indxparagenrbasescal]
if scaltype == 'self':
paragenrscalbase = tdpy.icdf_self(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)
elif scaltype == 'logt':
paragenrscalbase = tdpy.icdf_logt(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)
elif scaltype == 'atan':
listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[indxparagenrbasescal]
paragenrscalbase = tdpy.icdf_atan(paragenrunitbase, minmparagenrscalbase, listmaxmparagenrscalbase)
elif scaltype == 'gaus' or scaltype == 'eerr':
listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[indxparagenrbasescal]
liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[indxparagenrbasescal]
if scaltype == 'eerr':
cdfnminmparagenrscalbaseunit = gmod.cdfnminmparagenrscalbaseunit[indxparagenrbasescal]
listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[indxparagenrbasescal]
paragenrscalbase = tdpy.icdf_eerr(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase, cdfnminmparagenrscalbaseunit, listparagenrscalbaseunitdiff)
else:
paragenrscalbase = tdpy.icdf_gaus(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase)
elif scaltype == 'pois':
paragenrscalbase = paragenrunitbase
if gdat.booldiagmode:
if not np.isfinite(paragenrscalbase).all():
print('scaltype')
print(scaltype)
print('paragenrscalbase')
print(paragenrscalbase)
print('type(paragenrscalbase)')
print(type(paragenrscalbase))
print('paragenrscalbase.dtype')
print(paragenrscalbase.dtype)
raise Exception('')
return paragenrscalbase
def icdf_trap(gdat, strgmodl, cdfn, paragenrscalfull, scalcomp, nameparagenrelem, l):
gmod = getattr(gdat, strgmodl)
if scalcomp == 'self' or scalcomp == 'powr' or scalcomp == 'dpowslopbrek' or scalcomp == 'logt':
minm = getattr(gmod.minmpara, nameparagenrelem)
if scalcomp != 'self':
maxm = getattr(gmod.maxmpara, nameparagenrelem)
if scalcomp == 'powr':
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio%spop%d' % (nameparagenrelem, l))]
if gdat.booldiagmode:
if not np.isfinite(slop):
raise Exception('')
if maxm < minm:
raise Exception('')
icdf = tdpy.icdf_powr(cdfn, minm, maxm, slop)
if scalcomp == 'dpowslopbrek':
distbrek = paragenrscalfull[getattr(gmod.indxpara, 'brekprio' + nameparagenrelem)[l]]
sloplowr = paragenrscalfull[getattr(gmod.indxpara, 'sloplowrprio' + nameparagenrelem)[l]]
slopuppr = paragenrscalfull[getattr(gmod.indxpara, 'slopupprprio' + nameparagenrelem)[l]]
icdf = tdpy.icdf_dpow(cdfn, minm, maxm, distbrek, sloplowr, slopuppr)
if scalcomp == 'expo':
sexp = getattr(gmod, nameparagenrelem + 'distsexppop%d' % l)
icdf = tdpy.icdf_expo(cdfn, maxm, sexp)
if scalcomp == 'self':
fact = getattr(gmod.factpara, nameparagenrelem)
icdf = tdpy.icdf_self_fact(cdfn, minm, fact)
if scalcomp == 'logt':
icdf = tdpy.icdf_logt(cdfn, minm, fact)
if scalcomp == 'dexp':
scal = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distscal')[l]]
icdf = tdpy.icdf_dexp(cdfn, maxm, scal)
if scalcomp == 'lnormeanstdv':
distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
icdf = tdpy.icdf_lnor(cdfn, distmean, diststdv)
if scalcomp == 'igam':
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + nameparagenrelem)[l]]
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
icdf = tdpy.icdf_igam(cdfn, slop, cutf)
if scalcomp == 'gaus':
distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
icdf = tdpy.icdf_gaus(cdfn, distmean, diststdv)
if gdat.booldiagmode:
if not np.isfinite(icdf).all():
print('icdf')
print(icdf)
raise Exception('')
return icdf
def cdfn_trap(gdat, gdatmodi, strgmodl, icdf, indxpoplthis):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmod.listscalparagenrelem = gmod.listscalparagenrelem[indxpoplthis]
cdfn = np.empty_like(icdf)
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[indxpoplthis]):
if gmod.listscalparagenrelem[k] == 'self' or gmod.listscalparagenrelem[k] == 'dexp' or gmod.listscalparagenrelem[k] == 'expo' \
or gmod.listscalparagenrelem[k] == 'powr' or gmod.listscalparagenrelem[k] == 'dpowslopbrek':
minm = getattr(gdat.fitt.minm, nameparagenrelem)
if gmod.listscalparagenrelem[k] == 'powr':
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
slop = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]
cdfn[k] = cdfn_powr(icdf[k], minm, maxm, slop)
elif gmod.listscalparagenrelem[k] == 'dpowslopbrek':
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
brek = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[indxpoplthis]]
sloplowr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[indxpoplthis]]
slopuppr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[indxpoplthis]]
cdfn[k] = cdfn_dpow(icdf[k], minm, maxm, brek, sloplowr, slopuppr)
else:
fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)
cdfn[k] = cdfn_self(icdf[k], minm, fact)
if gmod.listscalparagenrelem[k] == 'lnormeanstdv':
distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]
diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]
cdfn[k] = cdfn_lnor(icdf[k], distmean, slop)
if gmod.listscalparagenrelem[k] == 'igam':
slop = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
cdfn[k] = cdfn_igam(icdf[k], slop, cutf)
if gmod.listscalparagenrelem[k] == 'gaus':
distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]
diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]
cdfn[k] = cdfn_gaus(icdf[k], distmean, diststdv)
return cdfn
### update sampler state
def updt_stat(gdat, gdatmodi):
if gdat.typeverb > 1:
print('updt_stat()')
# update the sample and the unit sample vectors
gdatmodi.this.lpritotl = gdatmodi.next.lpritotl
gdatmodi.this.lliktotl = gdatmodi.next.lliktotl
gdatmodi.this.lpostotl = gdatmodi.next.lpostotl
gdatmodi.this.paragenrscalfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrscalfull[gdatmodi.indxsampmodi])
gdatmodi.this.paragenrunitfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrunitfull[gdatmodi.indxsampmodi])
if gdatmodi.this.indxproptype > 0:
gdatmodi.this.indxelemfull = deepcopy(gdatmodi.next.indxelemfull)
gdatmodi.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdatmodi.this.indxelemfull, 'fitt')
def initcompfromstat(gdat, gdatmodi, namerefr):
for l in gmod.indxpopl:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
minm = getattr(gdat.fitt.minmpara, nameparagenrelem)
maxm = getattr(gdat.fitt.maxmpara, nameparagenrelem)
try:
comp = getattr(gdat, namerefr + nameparagenrelem)[l][0, :]
if gmod.listscalparagenrelem[l][g] == 'self' or gmod.listscalparagenrelem[l][g] == 'logt':
fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)
if gmod.listscalparagenrelem[l][g] == 'self':
compunit = cdfn_self(comp, minm, fact)
if gmod.listscalparagenrelem[l][g] == 'logt':
compunit = cdfn_logt(comp, minm, fact)
if gmod.listscalparagenrelem[l][g] == 'expo':
scal = getattr(gdat.fitt, 'gangdistsexp')
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
compunit = cdfn_expo(icdf, maxm, scal)
if gmod.listscalparagenrelem[l][g] == 'powr' or gmod.listscalparagenrelem[l][g] == 'igam':
slop = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[l]]
if gmod.listscalparagenrelem[l][g] == 'powr':
compunit = cdfn_powr(comp, minm, maxm, slop)
if gmod.listscalparagenrelem[l][g] == 'igam':
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
compunit = cdfn_igam(comp, slop, cutf)
if gmod.listscalparagenrelem[l][g] == 'dpowslopbrek':
brek = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[l]]
sloplowr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[l]]
slopuppr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[l]]
compunit = cdfn_powr(comp, minm, maxm, brek, sloplowr, slopuppr)
if gmod.listscalparagenrelem[l][g] == 'gaus':
distmean = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[l]]
diststdv = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[l]]
compunit = cdfn_gaus(comp, distmean, diststdv)
except:
if gdat.typeverb > 0:
print('Initialization from the reference catalog failed for %s. Sampling randomly...' % nameparagenrelem)
compunit = np.random.rand(gdatmodi.this.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int))
gdatmodi.this.paragenrunitfull[gdatmodi.this.indxparagenrfullelem[l][nameparagenrelem]] = compunit
### find the set of pixels in proximity to a position on the map
def retr_indxpixlelemconc(gdat, strgmodl, dictelem, l):
gmod = getattr(gdat, strgmodl)
lgal = dictelem[l]['lgal']
bgal = dictelem[l]['bgal']
varbampl = dictelem[l][gmod.nameparagenrelemampl[l]]
if gmod.typeelemspateval[l] == 'locl':
listindxpixlelem = [[] for k in range(lgal.size)]
for k in range(lgal.size):
indxpixlpnts = retr_indxpixl(gdat, bgal[k], lgal[k])
indxfluxproxtemp = np.digitize(varbampl[k], gdat.binspara.prox)
if indxfluxproxtemp > 0:
indxfluxproxtemp -= 1
if indxfluxproxtemp == gdat.binspara.prox.size - 1:
print('Warning! Index of the proximity pixel list overflew. Taking the largest list...')
indxfluxproxtemp -= 1
indxpixlelem = gdat.indxpixlprox[indxfluxproxtemp][indxpixlpnts]
if isinstance(indxpixlelem, int):
indxpixlelem = gdat.indxpixl
listindxpixlelem[k] = indxpixlelem
listindxpixlelemconc = np.unique(np.concatenate(listindxpixlelem))
else:
listindxpixlelemconc = gdat.indxpixl
listindxpixlelem = gdat.indxpixl
return listindxpixlelem, listindxpixlelemconc
### find the distance between two points on the map
def retr_angldistunit(gdat, lgal, bgal, indxpixlelem, retranglcosi=False):
if gdat.typepixl == 'heal':
xdat, ydat, zaxi = retr_unit(lgal, bgal)
anglcosi = gdat.xdatgrid[indxpixlelem] * xdat + gdat.ydatgrid[indxpixlelem] * ydat + gdat.zaxigrid[indxpixlelem] * zaxi
if retranglcosi:
return anglcosi
else:
angldist = np.arccos(anglcosi)
return angldist
else:
angldist = np.sqrt((lgal - gdat.lgalgrid[indxpixlelem])**2 + (bgal - gdat.bgalgrid[indxpixlelem])**2)
return angldist
### find the pixel index of a point on the map
def retr_indxpixl(gdat, bgal, lgal):
if gdat.typepixl == 'heal':
indxpixl = gdat.pixlcnvt[hp.ang2pix(gdat.numbsideheal, np.pi / 2. - bgal, lgal)]
if gdat.booldiagmode:
if (indxpixl == -1).any():
raise Exception('pixlcnvt went negative!')
if gdat.typepixl == 'cart':
indxlgcr = np.floor(gdat.numbsidecart * (lgal - gdat.minmlgaldata) / 2. / gdat.maxmgangdata).astype(int)
indxbgcr = np.floor(gdat.numbsidecart * (bgal - gdat.minmbgaldata) / 2. / gdat.maxmgangdata).astype(int)
if np.isscalar(indxlgcr):
if indxlgcr < 0:
indxlgcr = 0
if indxlgcr >= gdat.numbsidecart:
indxlgcr = gdat.numbsidecart - 1
else:
indxlgcr[np.where(indxlgcr < 0)] = 0
indxlgcr[np.where(indxlgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1
if np.isscalar(indxbgcr):
if indxbgcr < 0:
indxbgcr = 0
if indxbgcr >= gdat.numbsidecart:
indxbgcr = gdat.numbsidecart - 1
else:
indxbgcr[np.where(indxbgcr < 0)] = 0
indxbgcr[np.where(indxbgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1
indxpixl = indxlgcr * gdat.numbsidecart + indxbgcr
# convert to an index of non-zero exposure pixels
#indxpixl = gdat.indxpixlroficnvt[indxpixl]
return indxpixl
## obtain count maps
def retr_cntp(gdat, sbrt):
cntp = sbrt * gdat.expo * gdat.apix
if gdat.enerdiff:
cntp *= gdat.deltener[:, None, None]
return cntp
## plotting
### construct path for plots
def retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, strgplot, nameinte=''):
if strgmodl == 'true' or strgstat == '':
path = gdat.pathinit + nameinte + strgplot + '.pdf'
elif strgstat == 'pdfn' or strgstat == 'mlik':
path = gdat.pathplotrtag + strgpdfn + '/finl/' + nameinte + strgstat + strgplot + '.pdf'
elif strgstat == 'this':
path = gdat.pathplotrtag + strgpdfn + '/fram/' + nameinte + strgstat + strgplot + '_swep%09d.pdf' % gdatmodi.cntrswep
return path
### determine the marker size
def retr_mrkrsize(gdat, strgmodl, compampl, nameparagenrelemampl):
gmod = getattr(gdat, strgmodl)
minm = getattr(gdat.minmpara, nameparagenrelemampl)
maxm = getattr(gdat.maxmpara, nameparagenrelemampl)
mrkrsize = (np.sqrt(compampl) - np.sqrt(minm)) / (np.sqrt(maxm) - np.sqrt(minm)) * (gdat.maxmmrkrsize - gdat.minmmrkrsize) + gdat.minmmrkrsize
return mrkrsize
## experiment specific
def retr_psfphubb(gmod):
# temp
gmod.psfpexpr = np.array([0.080, 0.087]) / gdat.anglfact
def retr_psfpchan(gmod):
# temp
#gmod.psfpexpr = np.array([0.25, 0.3, 0.4, 0.6, 0.7]) / gdat.anglfact
if gdat.numbenerfull == 5:
gmod.psfpexpr = np.array([0.424 / gdat.anglfact, 2.75, 0.424 / gdat.anglfact, 2.59, 0.440 / gdat.anglfact, 2.47, 0.457 / gdat.anglfact, 2.45, 0.529 / gdat.anglfact, 3.72])
if gdat.numbenerfull == 2:
gmod.psfpexpr = np.array([0.427 / gdat.anglfact, 2.57, 0.449 / gdat.anglfact, 2.49])
#gdat.psfpchan = gmod.psfpexpr[(2 * gdat.indxenerincl[:, None] + np.arange(2)[None, :]).flatten()]
#gmod.psfpexpr = np.array([0.25 / gdat.anglfact,
# 0.30 / gdat.anglfacti\
# 0.40 / gdat.anglfacti\
# 0.60 / gdat.anglfacti\
# 0.70 / gdat.anglfacti
#gmod.psfpexpr = np.array([0.35 / gdat.anglfact, 2e-1, 1.9, 0.5 / gdat.anglfact, 1.e-1, 2.])
#gmod.psfpexpr = np.array([0.25 / gdat.anglfact, 2.0e-1, 1.9, \
# 0.30 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.40 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.60 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.70 / gdat.anglfact, 1.0e-1, 2.0])
def retr_psfpsdyn(gmod):
gmod.psfpexpr = np.array([0.05])
def retr_psfpferm(gmod):
if gdat.anlytype.startswith('rec8'):
path = gdat.pathdata + 'expr/irfn/psf_P8R2_SOURCE_V6_PSF.fits'
else:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'
irfn = astropy.io.fits.getdata(path, 1)
minmener = irfn['energ_lo'].squeeze() * 1e-3 # [GeV]
maxmener = irfn['energ_hi'].squeeze() * 1e-3 # [GeV]
enerirfn = np.sqrt(minmener * maxmener)
numbpsfpscal = 3
numbpsfpform = 5
fermscal = np.zeros((gdat.numbevtt, numbpsfpscal))
fermform = np.zeros((gdat.numbener, gdat.numbevtt, numbpsfpform))
strgpara = ['score', 'gcore', 'stail', 'gtail', 'ntail']
for m in gdat.indxevtt:
if gdat.anlytype.startswith('rec8'):
irfn = astropy.io.fits.getdata(path, 1 + 3 * gdat.indxevttincl[m])
fermscal[m, :] = astropy.io.fits.getdata(path, 2 + 3 * gdat.indxevttincl[m])['PSFSCALE']
else:
if m == 1:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_front.fits'
elif m == 0:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'
else:
continue
irfn = astropy.io.fits.getdata(path, 1)
fermscal[m, :] = astropy.io.fits.getdata(path, 2)['PSFSCALE']
for k in range(numbpsfpform):
fermform[:, m, k] = sp.interpolate.interp1d(enerirfn, np.mean(irfn[strgpara[k]].squeeze(), axis=0), fill_value='extrapolate')(gdat.meanpara.ener)
# convert N_tail to f_core
for m in gdat.indxevtt:
for i in gdat.indxener:
fermform[i, m, 4] = 1. / (1. + fermform[i, m, 4] * fermform[i, m, 2]**2 / fermform[i, m, 0]**2)
# calculate the scale factor
gdat.fermscalfact = np.sqrt((fermscal[None, :, 0] * (10. * gdat.meanpara.ener[:, None])**fermscal[None, :, 2])**2 + fermscal[None, :, 1]**2)
# store the fermi PSF parameters
gmod.psfpexpr = np.zeros(gdat.numbener * gdat.numbevtt * numbpsfpform)
for m in gdat.indxevtt:
for k in range(numbpsfpform):
indxfermpsfptemp = m * numbpsfpform * gdat.numbener + gdat.indxener * numbpsfpform + k
gmod.psfpexpr[indxfermpsfptemp] = fermform[:, m, k]
def retr_refrchaninit(gdat):
gdat.indxrefr = np.arange(gdat.numbrefr)
gdat.dictrefr = []
for q in gdat.indxrefr:
gdat.dictrefr.append(dict())
gdat.refr.namepara.elemsign = ['flux', 'magt']
gdat.refr.lablelem = ['Xue+2011', 'Wolf+2008']
gdat.listnamerefr += ['xu11', 'wo08']
setattr(gdat, 'plotminmotyp', 0.)
setattr(gdat, 'plottmaxmotyp', 1.)
setattr(gmod.lablrootpara, 'otyp', 'O')
setattr(gdat, 'scalotypplot', 'self')
setattr(gmod.lablrootpara, 'otypxu11', 'O')
for name in gdat.listnamerefr:
setattr(gdat, 'plotminmotyp' + name, 0.)
setattr(gdat, 'plotmaxmotyp' + name, 1.)
if gdat.strgcnfg == 'pcat_chan_inpt_home4msc':
with open(gdat.pathinpt + 'ECDFS_Cross_ID_Hsu2014.txt', 'r') as thisfile:
for k, line in enumerate(thisfile):
if k < 18:
continue
rasccand =line[2]
declcand =line[2]
gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'otyp', 'lumi']
gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'magt', 'reds', 'otyp']
def retr_refrchanfinl(gdat):
booltemp = False
if gdat.anlytype.startswith('extr'):
if gdat.numbsidecart == 300:
gdat.numbpixllgalshft[0] = 1490
gdat.numbpixlbgalshft[0] = 1430
else:
booltemp = True
elif gdat.anlytype.startswith('home'):
gdat.numbpixllgalshft[0] = 0
gdat.numbpixlbgalshft[0] = 0
if gdat.numbsidecart == 600:
pass
elif gdat.numbsidecart == 100:
indxtile = int(gdat.anlytype[-4:])
numbsidecntr = int(gdat.anlytype[8:12])
numbtileside = numbsidecntr / gdat.numbsidecart
indxtilexaxi = indxtile // numbtileside
indxtileyaxi = indxtile % numbtileside
gdat.numbpixllgalshft[0] += indxtilexaxi * gdat.numbsidecart
gdat.numbpixlbgalshft[0] += indxtileyaxi * gdat.numbsidecart
elif gdat.numbsidecart == 300:
gdat.numbpixllgalshft[0] += 150
gdat.numbpixlbgalshft[0] += 150
else:
booltemp = True
else:
booltemp = True
if booltemp:
raise Exception('Reference elements cannot be aligned with the spatial axes!')
## WCS object for rotating reference elements into the ROI
if gdat.numbener == 2:
gdat.listpathwcss[0] = gdat.pathinpt + 'CDFS-4Ms-0p5to2-asca-im-bin1.fits'
else:
gdat.listpathwcss[0] = gdat.pathinpt + '0.5-0.91028_flux_%sMs.img' % gdat.anlytype[4]
# Xue et al. (2011)
#with open(gdat.pathinpt + 'chancatl.txt', 'r') as thisfile:
pathfile = gdat.pathinpt + 'Xue2011.fits'
hdun = pf.open(pathfile)
hdun.info()
lgalchan = hdun[1].data['_Glon'] / 180. * pi
bgalchan = hdun[1].data['_Glat'] / 180. * pi
fluxchansoft = hdun[1].data['SFlux']
fluxchanhard = hdun[1].data['HFlux']
objttypechan = hdun[1].data['Otype']
gdat.refrlumi[0][0] = hdun[1].data['Lx']
# position
gdat.refr.dictelem[0]['lgal'] = lgalchan
gdat.refr.dictelem[0]['bgal'] = bgalchan
# spectra
gdat.refrspec = [[np.zeros((3, gdat.numbener, lgalchan.size))]]
if gdat.numbener == 2:
gdat.refrspec[0][0, 0, :] = fluxchansoft * 0.624e9
gdat.refrspec[0][0, 1, :] = fluxchanhard * 0.624e9 / 16.
else:
gdat.refrspec[0][0, :, :] = 2. * fluxchansoft[None, :] * 0.624e9
gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :]
gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :]
# fluxes
gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]
# spectral indices
if gdat.numbener > 1:
gdat.refrsind[0] = -np.log(gdat.refrspec[0][0, 1, :] / gdat.refrspec[0][0, 0, :]) / np.log(np.sqrt(7. / 2.) / np.sqrt(0.5 * 2.))
## object type
objttypechantemp = np.zeros(lgalchan.size) - 1.
indx = np.where(objttypechan == 'AGN')[0]
objttypechantemp[indx] = 0.165
indx = np.where(objttypechan == 'Galaxy')[0]
objttypechantemp[indx] = 0.495
indx = np.where(objttypechan == 'Star')[0]
objttypechantemp[indx] = 0.835
gdat.refrotyp[0][0] = objttypechantemp
# Wolf et al. (2011)
path = gdat.pathdata + 'inpt/Wolf2008.fits'
data = astropy.io.fits.getdata(path)
gdat.refrlgal[1] = np.deg2rad(data['_Glon'])
gdat.refrlgal[1] = ((gdat.refrlgal[1] - pi) % (2. * pi)) - pi
gdat.refrbgal[1] = np.deg2rad(data['_Glat'])
gdat.refrmagt[1][0] = data['Rmag']
gdat.refrreds[1][0] = data['MCz']
#listname = []
#for k in range(data['MCclass'].size):
# if not data['MCclass'][k] in listname:
# listname.append(data['MCclass'][k])
listname = ['Galaxy', 'Galaxy (Uncl!)', 'QSO (Gal?)', 'Galaxy (Star?)', 'Star', 'Strange Object', 'QSO', 'WDwarf']
gdat.refrotyp[1][0] = np.zeros_like(gdat.refrreds[1][0]) - 1.
for k, name in enumerate(listname):
indx = np.where(data['MCclass'] == name)[0]
gdat.refrotyp[1][0][indx] = k / 10.
# error budget
for name in ['lgal', 'bgal', 'sind', 'otyp', 'lumi', 'magt', 'reds']:
refrtile = [[] for q in gdat.indxrefr]
refrfeat = getattr(gdat.refr, name)
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrtile[q] = np.tile(refrfeat[q], (3, 1))
setattr(gdat.refr, name, refrtile)
def retr_refrferminit(gdat):
gdat.listnamerefr += ['ac15', 'ma05']
gdat.indxrefr = np.arange(gdat.numbrefr)
gdat.refr.lablelem = ['Acero+2015', 'Manchester+2005']
gdat.refr.namepara.elemsign = ['flux', 'flux0400']
setattr(gmod.lablrootpara, 'curvac15', '%s_{3FGL}' % gdat.lablcurv)
setattr(gmod.lablrootpara, 'expcac15', 'E_{c,3FGL}')
for name in gdat.listnamerefr:
setattr(gdat.minmpara, 'curv' + name, -1.)
setattr(gdat.maxmpara, 'curv' + name, 1.)
setattr(gdat.minmpara, 'expc' + name, 0.1)
setattr(gdat.maxmpara, 'expc' + name, 10.)
gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'curv', 'expc', 'tvar', 'etag', 'styp', 'sindcolr0001', 'sindcolr0002']
gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'flux0400', 'per0', 'per1']
def retr_refrfermfinl(gdat):
gdat.minmstyp = -0.5
gdat.maxmstyp = 3.5
gdat.lablstyp = 'S'
gmod.scalstypplot = 'self'
gdat.minmtvar = 0.
gdat.maxmtvar = 400.
gdat.labltvar = 'T'
gmod.scaltvarplot = 'logt'
# Acero+2015
path = gdat.pathdata + 'expr/pnts/gll_psc_v16.fit'
fgl3 = astropy.io.fits.getdata(path)
gdat.refr.dictelem[0]['lgal'] = np.deg2rad(fgl3['glon'])
gdat.refr.dictelem[0]['lgal'] = np.pi - ((gdat.refr.dictelem[0]['lgal'] - np.pi) % (2. * np.pi))
gdat.refr.dictelem[0]['bgal'] = np.deg2rad(fgl3['glat'])
gdat.refr.numbelemfull = gdat.refr.dictelem[0]['lgal'].size
gdat.refrspec = [np.empty((3, gdat.numbener, gdat.refr.dictelem[0]['lgal'].size))]
gdat.refrspec[0][0, :, :] = np.stack((fgl3['Flux300_1000'], fgl3['Flux1000_3000'], fgl3['Flux3000_10000']))[gdat.indxenerincl, :] / gdat.deltener[:, None]
fgl3specstdvtemp = np.stack((fgl3['Unc_Flux100_300'], fgl3['Unc_Flux300_1000'], fgl3['Unc_Flux1000_3000'], fgl3['Unc_Flux3000_10000'], \
fgl3['Unc_Flux10000_100000']))[gdat.indxenerincl, :, :] / gdat.deltener[:, None, None]
gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 0]
gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 1]
gdat.refrspec[0][np.where(np.isfinite(gdat.refrspec[0]) == False)] = 0.
gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]
gdat.refrsindcolr0001[0] = -np.log(gdat.refrspec[0][:, 1, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[1] / gdat.enerpivt)
gdat.refrsindcolr0002[0] = -np.log(gdat.refrspec[0][:, 2, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[2] / gdat.enerpivt)
fgl3axisstdv = (fgl3['Conf_68_SemiMinor'] + fgl3['Conf_68_SemiMajor']) * 0.5
fgl3anglstdv = np.deg2rad(fgl3['Conf_68_PosAng']) # [rad]
fgl3lgalstdv = fgl3axisstdv * abs(np.cos(fgl3anglstdv))
fgl3bgalstdv = fgl3axisstdv * abs(np.sin(fgl3anglstdv))
gdat.refretag[0] = np.zeros(gdat.refr.dictelem[0]['lgal'].size, dtype=object)
for k in range(gdat.refr.dictelem[0]['lgal'].size):
gdat.refretag[0][k] = '%s, %s, %s' % (fgl3['Source_Name'][k], fgl3['CLASS1'][k], fgl3['ASSOC1'][k])
gdat.refrtvar[0] = fgl3['Variability_Index']
gdat.refrstyp[0] = np.zeros_like(gdat.refr.dictelem[0]['lgal']) - 1
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PowerLaw ')] = 0
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'LogParabola ')] = 1
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLExpCutoff ')] = 2
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLSuperExpCutoff')] = 3
indx = np.where(gdat.refrstyp[0] == -1)[0]
if indx.size > 0:
raise Exception('')
gdat.refrsind[0] = fgl3['Spectral_Index']
gdat.refrcurv[0] = fgl3['beta']
gdat.refrexpc[0] = fgl3['Cutoff'] * 1e-3
gdat.refrcurv[0][np.where(np.logical_not(np.isfinite(gdat.refrcurv[0])))] = -10.
gdat.refrexpc[0][np.where(np.logical_not(np.isfinite(gdat.refrexpc[0])))] = 0.
gdat.refrsind[0] = np.tile(gdat.refrsind[0], (3, 1))
gdat.refrcurv[0] = np.tile(gdat.refrcurv[0], (3, 1))
gdat.refrexpc[0] = np.tile(gdat.refrexpc[0], (3, 1))
# Manchester+2005
path = gdat.pathdata + 'inpt/Manchester2005.fits'
data = astropy.io.fits.getdata(path)
gdat.refrlgal[1] = np.deg2rad(data['glon'])
gdat.refrlgal[1] = ((gdat.refrlgal[1] - np.pi) % (2. * np.pi)) - np.pi
gdat.refrbgal[1] = np.deg2rad(data['glat'])
gdat.refrper0[1] = data['P0']
gdat.refrper1[1] = data['P1']
gdat.refrflux0400[1] = data['S400']
#gdat.refrdism[1] = data['DM']
#gdat.refrdlos[1] = data['Dist']
# error budget
for name in ['lgal', 'bgal', 'per0', 'per1', 'flux0400', 'tvar', 'styp']:
refrtile = [[] for q in gdat.indxrefr]
refrfeat = getattr(gdat.refr, name)
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrtile[q] = np.tile(refrfeat[q], (3, 1))
setattr(gdat.refr, name, refrtile)
def retr_singgaus(scaldevi, sigc):
psfn = 1. / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)
return psfn
def retr_singking(scaldevi, sigc, gamc):
psfn = 1. / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc)
return psfn
def retr_doubgaus(scaldevi, frac, sigc, sigt):
psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)
return psfn
def retr_gausking(scaldevi, frac, sigc, sigt, gamt):
psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)
return psfn
def retr_doubking(scaldevi, frac, sigc, gamc, sigt, gamt):
psfn = frac / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc) + \
(1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)
return psfn
def retr_lgalbgal(gang, aang):
lgal = gang * np.cos(aang)
bgal = gang * np.sin(aang)
return lgal, bgal
def retr_gang(lgal, bgal):
gang = np.arccos(np.cos(lgal) * np.cos(bgal))
return gang
def retr_aang(lgal, bgal):
aang = np.arctan2(bgal, lgal)
return aang
def show_paragenrscalfull(gdat, gdatmodi, strgstat='this', strgmodl='fitt', indxsampshow=None):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodstat = getattr(gdatobjt, strgstat)
print('strgmodl: ' + strgmodl)
print('strgstat: ' + strgstat)
print('%5s %20s %30s %30s %15s' % ('index', 'namepara', 'paragenrunitfull', 'paragenrscalfull', 'scalpara'))
for k in gmod.indxparagenrfull:
if indxsampshow is not None and not k in indxsampshow:
continue
if gmod.numbparaelem > 0:
booltemp = False
for l in gmod.indxpopl:
if k == gmod.indxparagenrelemsing[l][0]:
booltemp = True
if booltemp:
print('')
print('%5d %20s %30g %30g %15s' % (k, gmod.namepara.genrfull[k], gmodstat.paragenrunitfull[k], gmodstat.paragenrscalfull[k], gmod.scalpara.genrfull[k]))
def prop_stat(gdat, gdatmodi, strgmodl, thisindxelem=None, thisindxpopl=None, brth=False, deth=False):
if gdat.typeverb > 1:
print('prop_stat()')
#indxproptype
# within, birth, death, split, merge
# 0, 1, 2, 3, 4
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodthis = getattr(gdatobjt, 'this')
gmodnext = getattr(gdatobjt, 'next')
if gmod.numbparaelem > 0:
if gdat.booldiagmode:
for l in gmod.indxpopl:
if len(gmodthis.indxelemfull[l]) > len(set(gmodthis.indxelemfull[l])):
raise Exception('Repeating entry in the element index list!')
thisindxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodthis.indxelemfull, strgmodl)
setattr(gmodthis, 'indxparagenrfullelem', thisindxparagenrfullelem)
else:
thisindxparagenrfullelem = None
gdatmodi.this.boolpropfilt = True
# index of the population in which a transdimensional proposal will be attempted
if gmod.numbparaelem > 0:
if thisindxpopl is None:
gdatmodi.indxpopltran = np.random.choice(gmod.indxpopl)
else:
gdatmodi.indxpopltran = thisindxpopl
numbelemtemp = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
# forced death or birth does not check for the prior on the dimensionality on purpose!
if gmod.numbparaelem > 0 and (deth or brth or np.random.rand() < gdat.probtran) and \
not (numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):
if brth or deth or np.random.rand() < gdat.probbrde or \
numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == 1 or numbelemtemp == 0:
## births and deaths
if numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] or deth:
gdatmodi.this.indxproptype = 2
elif numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or brth:
gdatmodi.this.indxproptype = 1
else:
if np.random.rand() < 0.5:
gdatmodi.this.indxproptype = 1
else:
gdatmodi.this.indxproptype = 2
else:
## splits and merges
if numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or numbelemtemp < 2:
gdatmodi.this.indxproptype = 3
elif numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]:
gdatmodi.this.indxproptype = 4
else:
if np.random.rand() < 0.5:
gdatmodi.this.indxproptype = 3
else:
gdatmodi.this.indxproptype = 4
else:
if gdat.booldiagmode and (gdatmodi.stdp > 1e2).any():
raise Exception('')
thisindxparagenrfullelemconc = []
for l in gmod.indxpopl:
thisindxparagenrfullelemconc.append(thisindxparagenrfullelem[l]['full'])
# get the indices of the current parameter vector
if gmod.numbparaelem > 0:
thisindxsampfull = np.concatenate([gmod.indxparagenrbasestdv] + thisindxparagenrfullelemconc)
else:
thisindxsampfull = gmod.indxparagenrbasestdv
thisstdp = gdatmodi.stdp[gdat.indxstdppara[thisindxsampfull]]
if not np.isfinite(thisstdp).all():
raise Exception('')
gdatmodi.this.indxproptype = 0
if gdat.booldiagmode and gdat.probspmr == 0 and gdatmodi.this.indxproptype > 2:
raise Exception('')
if gdat.typeverb > 1:
print('gdatmodi.this.indxproptype')
print(gdatmodi.this.indxproptype)
if gdatmodi.this.indxproptype == 0:
gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)
if gmod.numbparaelem > 0:
gmodnext.indxelemfull = gmodthis.indxelemfull
if gdatmodi.this.indxproptype > 0:
gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)
gmodnext.paragenrscalfull = np.copy(gmodthis.paragenrscalfull)
if gmod.numbparaelem > 0:
gmodnext.indxelemfull = deepcopy(gmodthis.indxelemfull)
if gdatmodi.this.indxproptype == 0:
## proposal scale
if False:
# amplitude-dependent proposal scale
for l in gmod.indxpopl:
thiscompampl = gmodthis.paragenrscalfull[thisindxparagenrfullelem[indxelemfull][gmod.nameparagenrelemampl[l]][l]]
compampl = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
minmcompampl = getattr(gmod.minmpara, gmod.nameparagenrelemampl[l])
thiscompunit = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
compunit = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
if nameparagenrelem == gmod.nameparagenrelemampl[l]:
# temp -- this only works if compampl is powr distributed
gdatmodi.this.stdp = stdpcomp / (thiscompampl / minmcompampl)**2.
gdatmodi.this.stdv = stdpcomp / (compampl / minmcompampl)**2.
gdatmodi.this.ltrp += np.sum(0.5 * (nextcompunit - thiscompunit)**2 * (1. / gdatmodi.this.stdv**2 - 1. / gdatmodi.this.stdv**2))
else:
gdatmodi.this.stdp = stdpcomp / (np.minimum(thiscompampl, compampl) / minmcompampl)**0.5
## propose a step
diffparagenrunitfull = np.random.normal(size=thisindxsampfull.size) * thisstdp
gmodnext.paragenrunitfull[thisindxsampfull] = gmodthis.paragenrunitfull[thisindxsampfull] + diffparagenrunitfull
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
indxsamplowr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] < 0.)[0]
if indxsamplowr.size > 0:
gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr] = abs(gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr]) % 1.
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
indxsampuppr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] > 1.)[0]
if indxsampuppr.size > 0:
gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] = (gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] - 1.) % 1.
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
gmodnext.paragenrscalfull = icdf_paragenrscalfull(gdat, strgmodl, gmodnext.paragenrunitfull, thisindxparagenrfullelem)
if gdat.booldiagmode:
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
if np.amin(gmodnext.paragenrunitfull[gmod.numbpopl:]) < 0.:
raise Exception('')
if np.amax(gmodnext.paragenrunitfull[gmod.numbpopl:]) > 1.:
raise Exception('')
if not np.isfinite(gmodnext.paragenrscalfull).all():
raise Exception('')
if gdatmodi.this.indxproptype > 0:
gdatmodi.indxsamptran = []
if gdatmodi.this.indxproptype == 1:
gdatmodi.this.auxipara = np.random.rand(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
elif gdatmodi.this.indxproptype != 2:
gdatmodi.this.auxipara = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:
# find an empty slot in the element list
for u in range(gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):
if not u in gdatmodi.this.indxelemfull[gdatmodi.indxpopltran]:
break
gdatmodi.indxelemmodi = [u]
gdatmodi.indxelemfullmodi = [gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)]
# sample indices to add the new element
gdatmodi.indxparagenrfullelemaddd = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemaddd)
gmodnext.indxelemfull[gdatmodi.indxpopltran].append(gdatmodi.indxelemmodi[0])
if gdatmodi.this.indxproptype == 1:
# sample auxiliary variables
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.this.auxipara
# death
if gdatmodi.this.indxproptype == 2:
# occupied element index to be killed
if thisindxelem is None:
dethindxindxelem = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))
else:
dethindxindxelem = thisindxelem
# element index to be killed
gdatmodi.indxelemmodi = []
gdatmodi.indxelemfullmodi = []
if gdat.typeverb > 1:
print('dethindxindxelem')
print(dethindxindxelem)
gdatmodi.indxelemmodi.append(gmodthis.indxelemfull[gdatmodi.indxpopltran][dethindxindxelem])
gdatmodi.indxelemfullmodi.append(dethindxindxelem)
# parameter indices to be killed
indxparagenrfullelemdeth = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.append(indxparagenrfullelemdeth)
gdatmodi.this.auxipara = gmodthis.paragenrscalfull[indxparagenrfullelemdeth]
if gdatmodi.this.indxproptype > 2:
gdatmodi.comppare = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
gdatmodi.compfrst = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
gdatmodi.compseco = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
# split
if gdatmodi.this.indxproptype == 3:
# find the probability of splitting elements
gdatmodi.indxelemfullsplt = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))
gdatmodi.indxelemsplt = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullsplt]
gdatmodi.indxelemfullmodi.insert(0, gdatmodi.indxelemfullsplt)
gdatmodi.indxelemmodi.insert(0, gdatmodi.indxelemsplt)
# sample indices for the first element
gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.insert(0, gdatmodi.indxparagenrfullelemfrst)
# sample indices for the second element
gdatmodi.indxsampseco = gdatmodi.indxparagenrfullelemaddd
# take the parent element parameters
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
gdatmodi.comppare[k] = np.copy(gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]])
# draw the auxiliary parameters
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.this.auxipara[g] = np.random.randn() * gdat.radispmr
elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.this.auxipara[g] = np.random.rand()
else:
gdatmodi.this.auxipara[g] = icdf_trap(gdat, strgmodl, np.random.rand(), gmodthis.paragenrscalfull, gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], l)
# determine the new parameters
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[1]) * gdatmodi.this.auxipara[0]
else:
gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[0]
gdatmodi.compfrst[1] = gdatmodi.comppare[1] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[1]
gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] * \
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[1] * gdatmodi.this.auxipara[0]
else:
gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[0]
gdatmodi.compseco[1] = gdatmodi.comppare[1] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[1]
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
for g in range(gmod.numbparagenrelemsing[gdatmodi.indxpopltran]):
if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.compfrst[g] = gdatmodi.comppare[g]
gdatmodi.compseco[g] = gdatmodi.this.auxipara[g]
# place the new parameters into the sample vector
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compfrst, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.compfrst
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compseco, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = gdatmodi.compseco
# check for prior boundaries
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
if np.fabs(gdatmodi.compfrst[0]) > gdat.maxmelin or np.fabs(gdatmodi.compseco[0]) > gdat.maxmelin:
gdatmodi.this.boolpropfilt = False
else:
if np.fabs(gdatmodi.compfrst[0]) > maxmlgal or np.fabs(gdatmodi.compseco[0]) > maxmlgal or \
np.fabs(gdatmodi.compfrst[1]) > maxmbgal or np.fabs(gdatmodi.compseco[1]) > maxmbgal:
gdatmodi.this.boolpropfilt = False
if gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]) or \
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):
gdatmodi.this.boolpropfilt = False
if gdat.typeverb > 1:
if not gdatmodi.this.boolpropfilt:
print('Rejecting the proposal due to a split that falls out of the prior...')
if gdatmodi.this.indxproptype == 4:
# determine the index of the primary element to be merged (in the full element list)
gdatmodi.indxelemfullmergfrst = np.random.choice(np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran])))
## first element index to be merged
gdatmodi.mergindxelemfrst = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergfrst]
# find the probability of merging this element with the others
probmerg = retr_probmerg(gdat, gdatmodi, gmodthis.paragenrscalfull, thisindxparagenrfullelem, gdatmodi.indxpopltran, 'seco', typeelem=gmod.typeelem)
indxelemfulltemp = np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran]))
if gdat.booldiagmode:
if indxelemfulltemp.size < 2:
raise Exception('')
gdatmodi.indxelemfullmergseco = np.random.choice(np.setdiff1d(indxelemfulltemp, np.array([gdatmodi.indxelemfullmergfrst])), p=probmerg)
gdatmodi.indxelemfullmodi = np.sort(np.array([gdatmodi.indxelemfullmergfrst, gdatmodi.indxelemfullmergseco]))
# parameters of the first element to be merged
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
## first
gdatmodi.compfrst[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]]
# determine indices of the modified elements in the sample vector
## first element
# temp -- this would not work for multiple populations !
gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemfrst)
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemfrst)
## second element index to be merged
gdatmodi.mergindxelemseco = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergseco]
## second element
gdatmodi.indxparagenrfullelemseco = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemseco)
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemseco)
# parameters of the elements to be merged
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
## second
gdatmodi.compseco[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[1]]]
# indices of the element to be merged
gdatmodi.indxelemmodi = [gdatmodi.mergindxelemfrst, gdatmodi.mergindxelemseco]
# auxiliary parameters
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]
else:
gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]
gdatmodi.this.auxipara[1] = gdatmodi.compseco[1] - gdatmodi.compfrst[1]
gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] / \
(gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]])
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.this.auxipara[g] = gdatmodi.compseco[g]
# merged element
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + \
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
if gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] > getattr(gdat, 'maxm' + gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):
gdatmodi.this.boolpropfilt = False
if gdat.typeverb > 1:
print('Proposal rejected due to falling outside the prior.')
return
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[1]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])
else:
gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])
gdatmodi.comppare[1] = gdatmodi.compfrst[1] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[1] - gdatmodi.compfrst[1])
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.comppare[g] = gdatmodi.compfrst[g] + (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \
(gdatmodi.compseco[g] - gdatmodi.compfrst[g])
elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.comppare[g] = gdatmodi.compfrst[g] + gdatmodi.compseco[g]
else:
gdatmodi.comppare[g] = gdatmodi.compfrst[g]
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.comppare, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.comppare
# calculate the proposed list of pairs
if gdat.typeverb > 1:
print('mergindxfrst: ', gdatmodi.mergindxelemfrst)
print('gdatmodi.indxelemfullmergfrst: ', gdatmodi.indxelemfullmergfrst)
print('mergindxseco: ', gdatmodi.mergindxelemseco)
print('gdatmodi.indxelemfullmergseco: ', gdatmodi.indxelemfullmergseco)
print('indxparagenrfullelemfrst: ', gdatmodi.indxparagenrfullelemfrst)
print('indxparagenrfullelemseco: ', gdatmodi.indxparagenrfullelemseco)
if gdat.typeverb > 1 and (gdatmodi.this.indxproptype == 3 or gdatmodi.this.boolpropfilt and gdatmodi.this.indxproptype == 4):
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
print('elinfrst: ', gdatmodi.compfrst[0])
print('amplfrst: ', gdatmodi.compfrst[1])
print('elinseco: ', gdatmodi.compseco[0])
print('amplseco: ', gdatmodi.compseco[1])
print('elinpare: ', gdatmodi.comppare[0])
print('fluxpare: ', gdatmodi.comppare[1])
print('auxipara[0][0]: ', gdatmodi.this.auxipara[0])
print('auxipara[0][1]: ', gdatmodi.this.auxipara[1])
else:
print('lgalfrst: ', gdat.anglfact * gdatmodi.compfrst[0])
print('bgalfrst: ', gdat.anglfact * gdatmodi.compfrst[1])
print('amplfrst: ', gdatmodi.compfrst[2])
print('lgalseco: ', gdat.anglfact * gdatmodi.compseco[0])
print('bgalseco: ', gdat.anglfact * gdatmodi.compseco[1])
print('amplseco: ', gdatmodi.compseco[2])
print('lgalpare: ', gdat.anglfact * gdatmodi.comppare[0])
print('bgalpare: ', gdat.anglfact * gdatmodi.comppare[1])
print('fluxpare: ', gdatmodi.comppare[2])
print('auxipara[0][0]: ', gdat.anglfact * gdatmodi.this.auxipara[0])
print('auxipara[0][1]: ', gdat.anglfact * gdatmodi.this.auxipara[1])
print('auxipara[0][2]: ', gdatmodi.this.auxipara[2])
if gmod.numbparaelem > 0 and gdatmodi.this.indxproptype > 0 and gdatmodi.this.boolpropfilt:
# change the number of elements
if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:
gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] + 1
if gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4:
gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] - 1
gmodnext.paragenrunitfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
# remove the element from the occupied element list
if (gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4):
for a, indxelem in enumerate(gdatmodi.indxelemmodi):
if a == 0 and gdatmodi.this.indxproptype == 2 or a == 1 and gdatmodi.this.indxproptype == 4:
gmodnext.indxelemfull[gdatmodi.indxpopltran].remove(indxelem)
if gdatmodi.this.indxproptype == 0:
gdatmodi.indxsampmodi = thisindxsampfull
else:
if gdatmodi.this.indxproptype == 1:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))
if gdatmodi.this.indxproptype == 2:
gdatmodi.indxsampmodi = [gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
if gdatmodi.this.indxproptype == 3:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), \
gdatmodi.indxsamptran[0], gdatmodi.indxsamptran[1]))
if gdatmodi.this.indxproptype == 4:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))
if gmod.numbparaelem > 0:
if gdatmodi.this.indxproptype == 0:
indxparagenrfullelem = thisindxparagenrfullelem
else:
indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodnext.indxelemfull, strgmodl)
if gdat.typeverb > 1:
print('gdatmodi.indxsampmodi')
print(gdatmodi.indxsampmodi)
if gmod.numbparaelem > 0:
print('gmodthis.indxelemfull')
print(gmodthis.indxelemfull)
print('gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)')
print(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int))
if gdatmodi.this.indxproptype > 0:
print('gdatmodi.indxelemmodi')
print(gdatmodi.indxelemmodi)
print('gdatmodi.indxelemfullmodi')
print(gdatmodi.indxelemfullmodi)
print('gdatmodi.this.boolpropfilt')
print(gdatmodi.this.boolpropfilt)
print('indxparagenrfullelem')
print(indxparagenrfullelem)
if gdatmodi.this.indxproptype == 1:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0][g]] = icdf_trap(gdat, strgmodl, gdatmodi.this.auxipara[g], gmodthis.paragenrscalfull, \
gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gdatmodi.indxpopltran)
if gdat.booldiagmode:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]]):
print('l')
print(l)
print('gmod.indxpara.numbelem')
print(gmod.indxpara.numbelem)
print('gmodthis.paragenrunitfull')
print(gmodthis.paragenrunitfull)
raise Exception('')
if gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if strgmodl == 'fitt':
diffparagenrscalfull = abs(gmodnext.paragenrscalfull - gmodthis.paragenrscalfull)
#size = np.where(((gmodthis.paragenrscalfull == 0.) & (diffparagenrscalfull > 0.)) | ((gmodthis.paragenrscalfull != 0.) & (diffparagenrscalfull / gmodthis.paragenrscalfull > 0)))[0].size
size = np.where(diffparagenrscalfull != 0.)[0].size
if gdatmodi.this.indxproptype == 1:
if size - 1 != gmod.numbparagenrelemsing[gdatmodi.indxpopltran]:
raise Exception('')
def calc_probprop(gdat, gdatmodi):
gmod = gdat.fitt
# calculate the factor to multiply the acceptance rate, i.e.,
## probability of the auxiliary parameters,
if gdatmodi.this.indxproptype == 0:
gdatmodi.this.lpau = 0.
elif gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 2:
gdatmodi.this.lpau = gdatmodi.next.lpritotl - gdatmodi.this.lpritotl
lpautemp = 0.5 * gdat.priofactdoff * gmod.numbparagenrelemsing[gdatmodi.indxpopltran]
if gdatmodi.this.indxproptype == 1:
gdatmodi.this.lpau += lpautemp
if gdatmodi.this.indxproptype == 2:
gdatmodi.this.lpau -= lpautemp
elif gdatmodi.this.indxproptype == 3 or gdatmodi.this.indxproptype == 4:
gdatmodi.this.lpau = 0.
dictelemtemp = [dict()]
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.this.lpau += -0.5 * np.log(2. * np.pi * gdat.radispmr**2) - 0.5 * (gdatmodi.this.auxipara[g] / gdat.radispmr)**2
elif g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
dictelemtemp[0][nameparagenrelem] = gdatmodi.this.auxipara[g]
gdatmodi.this.lpau += retr_lprielem(gdat, 'fitt', gdatmodi.indxpopltran, g, \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gdatmodi.this.paragenrscalfull, dictelemtemp, [1])
if gdatmodi.this.indxproptype == 4:
gdatmodi.this.lpau *= -1.
if gdatmodi.this.indxproptype > 2 and gdatmodi.this.boolpropfilt:
## the ratio of the probability of the reverse and forward proposals, and
if gdatmodi.this.indxproptype == 3:
gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.next.paragenrscalfull, gdatmodi.next.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \
typeelem=gmod.typeelem)
gdatmodi.this.ltrp = np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran] + 1) + np.log(gdatmodi.this.probmergtotl)
else:
gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.this.paragenrscalfull, gdatmodi.this.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \
typeelem=gmod.typeelem)
gdatmodi.this.ltrp = -np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran]) - np.log(gdatmodi.this.probmergtotl)
## Jacobian
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.this.ljcb = np.log(gdatmodi.comppare[1])
else:
gdatmodi.this.ljcb = np.log(gdatmodi.comppare[2])
if gdatmodi.this.indxproptype == 4:
gdatmodi.this.ljcb *= -1.
else:
gdatmodi.this.ljcb = 0.
gdatmodi.this.ltrp = 0.
for l in gmod.indxpopl:
if gdatmodi.this.indxproptype > 0:
setattr(gdatmodi, 'auxiparapop%d' % l, gdatmodi.this.auxipara)
def retr_indxparagenrfullelem(gdat, indxelemfull, strgmodl):
gmod = getattr(gdat, strgmodl)
## element parameters
if gmod.numbparaelem > 0:
indxparagenrfullelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
indxparagenrfulltemp = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + np.array(indxelemfull[l], dtype=int) * gmod.numbparagenrelemsing[l]
cntr = tdpy.cntr()
indxparagenrfullelem[l] = dict()
for nameparagenrelem in gmod.namepara.genrelem[l]:
indxparagenrfullelem[l][nameparagenrelem] = indxparagenrfulltemp + cntr.incr()
indxparagenrfullelem[l]['full'] = np.repeat(indxparagenrfulltemp, gmod.numbparagenrelemsing[l]) + np.tile(gmod.indxparagenrelemsing[l], len(indxelemfull[l]))
if gdat.booldiagmode:
for l in gmod.indxpopl:
if len(indxparagenrfullelem[l]['full']) > 0:
if np.amax(indxparagenrfullelem[l]['full']) > gmod.numbparagenrelem[l] + gmod.numbparagenrbase:
print('strgmodl')
print(strgmodl)
print('strgstat')
print(strgstat)
print('gmod.numbparagenrbase')
print(gmod.numbparagenrbase)
print('gmod.numbparagenrelem[l]')
print(gmod.numbparagenrelem[l])
print('indxparagenrfullelem[l][full]')
summgene(indxparagenrfullelem[l]['full'])
print('gdat.fitt.minmpara.numbelempop0')
print(gdat.fitt.minmpara.numbelempop0)
print('gdat.fitt.maxmpara.numbelempop0')
print(gdat.fitt.maxmpara.numbelempop0)
raise Exception('Element parameter indices are bad.')
else:
indxparagenrfullelem = None
return indxparagenrfullelem
def retr_weigmergodim(gdat, elin, elinothr):
weigmerg = np.exp(-0.5 * ((elin - elinothr) / gdat.radispmr)**2)
return weigmerg
def retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr):
weigmerg = np.exp(-0.5 * (((lgal - lgalothr) / gdat.radispmr)**2 + ((bgal - bgalothr) / gdat.radispmr)**2))
return weigmerg
def retr_probmerg(gdat, gdatmodi, paragenrscalfull, indxparagenrfullelem, indxpopltran, strgtype, typeelem=None):
# calculate the weights
if strgtype == 'seco':
numb = 1
if strgtype == 'pair':
numb = 2
listweigmerg = []
for a in range(numb):
if gmod.typeelem[indxpopltran].startswith('lghtline'):
elintotl = paragenrscalfull[indxparagenrfullelem['elin'][indxpopltran]]
elin = elintotl[gdatmodi.indxelemfullmodi[0]]
elinothr = np.concatenate((elintotl[:gdatmodi.indxelemfullmodi[0]], elintotl[gdatmodi.indxelemfullmodi[0]+1:]))
weigmerg = retr_weigmergodim(gdat, elin, elinothr)
else:
lgaltotl = paragenrscalfull[indxparagenrfullelem['lgal'][indxpopltran]]
bgaltotl = paragenrscalfull[indxparagenrfullelem['bgal'][indxpopltran]]
lgal = lgaltotl[gdatmodi.indxelemfullmodi[0]]
bgal = bgaltotl[gdatmodi.indxelemfullmodi[0]]
lgalothr = np.concatenate((lgaltotl[:gdatmodi.indxelemfullmodi[0]], lgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))
bgalothr = np.concatenate((bgaltotl[:gdatmodi.indxelemfullmodi[0]], bgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))
weigmerg = retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr)
listweigmerg.append(weigmerg)
# determine the probability of merging the second element given the first element
if strgtype == 'seco':
probmerg = listweigmerg[0] / np.sum(listweigmerg[0])
# determine the probability of merging the pair
if strgtype == 'pair':
if gmod.typeelem[indxpopltran].startswith('lghtline'):
weigpair = retr_weigmergtdim(gdat, elin, elintotl[gdatmodi.indxelemfullmodi[1]])
else:
weigpair = retr_weigmergtdim(gdat, lgal, lgaltotl[gdatmodi.indxelemfullmodi[1]], bgal, bgaltotl[gdatmodi.indxelemfullmodi[1]])
probmerg = weigpair / np.sum(listweigmerg[0]) + weigpair / np.sum(listweigmerg[1])
if gdat.booldiagmode:
if not np.isfinite(probmerg).all():
raise Exception('Merge probability is infinite.')
return probmerg
def retr_indxparaelem(gmod, l, u):
indxsamppnts = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + u * gmod.numbparagenrelemsing[l] + gmod.indxparagenrelemsing[l]
return indxsamppnts
def gang_detr():
gang, aang, lgal, bgal = sympy.symbols('gang aang lgal bgal')
AB = sympy.matrices.Matrix([[a1*b1,a1*b2,a1*b3],[a2*b1,a2*b2,a2*b3],[a3*b1,a3*b2,a3*b3]])
def retr_psfn(gdat, psfp, indxenertemp, thisangl, typemodlpsfn, strgmodl):
gmod = getattr(gdat, strgmodl)
indxpsfpinit = gmod.numbpsfptotl * (indxenertemp[:, None] + gdat.numbener * gdat.indxevtt[None, :])
if gdat.typeexpr == 'ferm':
scalangl = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(thisangl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]
scalanglnorm = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(gdat.binspara.angl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]
else:
scalangl = thisangl[None, :, None]
if typemodlpsfn == 'singgaus':
sigc = psfp[indxpsfpinit]
sigc = sigc[:, None, :]
psfn = retr_singgaus(scalangl, sigc)
elif typemodlpsfn == 'singking':
sigc = psfp[indxpsfpinit]
gamc = psfp[indxpsfpinit+1]
sigc = sigc[:, None, :]
gamc = gamc[:, None, :]
psfn = retr_singking(scalangl, sigc, gamc)
elif typemodlpsfn == 'doubking':
sigc = psfp[indxpsfpinit]
gamc = psfp[indxpsfpinit+1]
sigt = psfp[indxpsfpinit+2]
gamt = psfp[indxpsfpinit+3]
frac = psfp[indxpsfpinit+4]
sigc = sigc[:, None, :]
gamc = gamc[:, None, :]
sigt = sigt[:, None, :]
gamt = gamt[:, None, :]
frac = frac[:, None, :]
psfn = retr_doubking(scalangl, frac, sigc, gamc, sigt, gamt)
if gdat.typeexpr == 'ferm':
psfnnorm = retr_doubking(scalanglnorm, frac, sigc, gamc, sigt, gamt)
# normalize the PSF
if gdat.typeexpr == 'ferm':
fact = 2. * np.pi * np.trapz(psfnnorm * np.sin(gdat.binspara.angl[None, :, None]), gdat.binspara.angl, axis=1)[:, None, :]
psfn /= fact
return psfn
def retr_unit(lgal, bgal):
xdat = np.cos(bgal) * np.cos(lgal)
ydat = -np.cos(bgal) * np.sin(lgal)
zaxi = np.sin(bgal)
return xdat, ydat, zaxi
def retr_psec(gdat, conv):
# temp
conv = conv.reshape((gdat.numbsidecart, gdat.numbsidecart))
psec = (abs(scipy.fftpack.fft2(conv))**2)[:gdat.numbsidecarthalf, :gdat.numbsidecarthalf] * 1e-3
psec = psec.flatten()
return psec
def retr_psecodim(gdat, psec):
psec = psec.reshape((gdat.numbsidecarthalf, gdat.numbsidecarthalf))
psecodim = np.zeros(gdat.numbsidecarthalf)
for k in gdat.indxmpolodim:
indxmpol = np.where((gdat.meanpara.mpol > gdat.binspara.mpolodim[k]) & (gdat.meanpara.mpol < gdat.binspara.mpolodim[k+1]))
psecodim[k] = np.mean(psec[indxmpol])
psecodim *= gdat.meanpara.mpolodim**2
return psecodim
def retr_eerrnorm(minmvarb, maxmvarb, meanvarb, stdvvarb):
cdfnminm = 0.5 * (sp.special.erf((minmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)
cdfnmaxm = 0.5 * (sp.special.erf((maxmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)
cdfndiff = cdfnmaxm - cdfnminm
return cdfnminm, cdfndiff
def retr_condcatl(gdat):
# setup
## number of stacked samples
numbstks = 0
indxtupl = []
indxstks = []
indxstksparagenrscalfull = []
for n in gdat.indxsamptotl:
indxstks.append([])
indxstkssamptemp = []
for l in gmod.indxpopl:
indxstks[n].append([])
for k in range(len(gdat.listpostindxelemfull[n][l])):
indxstks[n][l].append(numbstks)
indxstkssamptemp.append(numbstks)
indxtupl.append([n, l, k])
numbstks += 1
indxstkssamp.append(np.array(indxstkssamptemp))
if gdat.typeverb > 1:
print('indxstks')
print(indxstks)
print('indxtupl')
print(indxtupl)
print('indxstkssamp')
print(indxstksparagenrscalfull)
print('numbstks')
print(numbstks)
cntr = 0
arrystks = np.zeros((numbstks, gmod.numbparagenrelemtotl))
for n in gdat.indxsamptotl:
indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdat.listpostindxelemfull[n], 'fitt')
for l in gmod.indxpopl:
for k in np.arange(len(gdat.listpostindxelemfull[n][l])):
for m, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
arrystks[indxstks[n][l][k], m] = gdat.listpostparagenrscalfull[n, gmodstat.indxparagenrfullelem[l][nameparagenrelem][k]]
if gdat.typeverb > 0:
print('Constructing the distance matrix for %d stacked samples...' % arrystks.shape[0])
timeinit = gdat.functime()
gdat.distthrs = np.empty(gmod.numbparagenrelemtotl)
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
# temp
l = 0
gdat.distthrs[k] = gdat.stdp[getattr(gdat, 'indxstdppop%d' % l + nameparagenrelem)]
# construct lists of samples for each proposal type
listdisttemp = [[] for k in range(gmod.numbparagenrelemtotl)]
indxstksrows = [[] for k in range(gmod.numbparagenrelemtotl)]
indxstkscols = [[] for k in range(gmod.numbparagenrelemtotl)]
thisperc = 0
cntr = 0
for k in gmod.indxparagenrelemtotl:
for n in range(numbstks):
dist = np.fabs(arrystks[n, k] - arrystks[:, k])
indxstks = np.where(dist < gdat.distthrs[k])[0]
if indxstks.size > 0:
for j in indxstks:
cntr += 1
listdisttemp[k].append(dist[j])
indxstksrows[k].append(n)
indxstkscols[k].append(j)
nextperc = np.floor(100. * float(k * numbstks + n) / numbstks / gmod.numbparagenrelemtotl)
if nextperc > thisperc:
thisperc = nextperc
if cntr > 1e6:
break
listdisttemp[k] = np.array(listdisttemp[k])
indxstksrows[k] = np.array(indxstksrows[k])
indxstkscols[k] = np.array(indxstkscols[k])
if cntr > 1e6:
break
listdist = [[] for k in range(gmod.numbparagenrelemtotl)]
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
listdist[k] = scipy.sparse.csr_matrix((listdisttemp[k], (indxstksrows[k], indxstkscols[k])), shape=(numbstks, numbstks))
listindxstkspair = []
indxstksleft = []
if gdat.typeverb > 0:
timefinl = gdat.functime()
indxstksleft = range(numbstks)
# list of sample lists of the labeled element
indxstksassc = []
cntr = 0
gdat.prvlthrs = 0.05
while len(indxstksleft) > 0:
# count number of associations
numbdist = np.zeros(numbstks, dtype=int) - 1
for p in range(len(indxstksleft)):
indxindx = np.where((listdist[0][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmlgal < gdat.anglassc) & \
(listdist[1][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmbgal < gdat.anglassc))[0]
numbdist[indxstksleft[p]] = indxindx.size
prvlmaxmesti = np.amax(numbdist) / float(gdat.numbsamptotl)
if prvlmaxmesti < gdat.prvlthrs:
break
# determine the element with the highest number of neighbors
indxstkscntr = np.argmax(numbdist)
indxsamptotlcntr = indxtupl[indxstkscntr][0]
indxpoplcntr = indxtupl[indxstkscntr][1]
indxelemcntr = indxtupl[indxstkscntr][2]
# add the central element sample
indxstksassc.append([])
indxstksassc[cntr].append(indxstkscntr)
indxstksleft.remove(indxstkscntr)
if gdat.typeverb > 1:
print('Match step %d' % cntr)
print('numbdist')
print(numbdist)
print('indxstkscntr')
print(indxstkscntr)
print('indxstksleft')
print(indxstksleft)
# add the associated element samples
if len(indxstksleft) > 0:
for n in gdat.indxsamptotl:
indxstkstemp = np.intersect1d(np.array(indxstksleft), indxstksparagenrscalfull[n])
if n == indxsamptotlcntr:
continue
if indxstkstemp.size > 0:
totl = np.zeros_like(indxstkstemp)
for k in gmod.indxparagenrelemtotl:
temp = listdist[k][indxstkscntr, indxstkstemp].tonp.array()[0]
totl = totl + temp**2
indxleft = np.argsort(totl)[0]
indxstksthis = indxstkstemp[indxleft]
thisbool = True
for k in gmod.indxparagenrelemtotl:
if listdist[k][indxstkscntr, indxstksthis] > gdat.distthrs[k]:
thisbool = False
if thisbool:
indxstksassc[cntr].append(indxstksthis)
indxstksleft.remove(indxstksthis)
# temp
#if gdat.makeplot:
# gdatmodi = tdpy.gdatstrt()
# gdatmodi.this.indxelemfull = deepcopy(listindxelemfull[n])
# for r in range(len(indxstksassc)):
# calc_poststkscond(gdat, indxstksassc)
# gdatmodi.this.indxelemfull = [[] for l in gmod.indxpopl]
# for indxstkstemp in indxstksleft:
# indxsamptotlcntr = indxtupl[indxstkstemp][0]
# indxpoplcntr = indxtupl[indxstkstemp][1]
# indxelemcntr = indxtupl[indxstkstemp][2]
# gdatmodi.this.paragenrscalfull = gdat.listparagenrscalfull[indxsamptotlcntr, :]
# gdatmodi.this.indxelemfull[].append()
# plot_genemaps(gdat, gdatmodi, 'this', 'cntpdata', strgpdfn, indxenerplot=0, indxevttplot=0, cond=True)
cntr += 1
gdat.dictglob['poststkscond'] = []
gdat.dictglob['liststkscond'] = []
# for each condensed element
for r in range(len(indxstksassc)):
gdat.dictglob['liststkscond'].append([])
gdat.dictglob['liststkscond'][r] = {}
gdat.dictglob['poststkscond'].append([])
gdat.dictglob['poststkscond'][r] = {}
for strgfeat in gmod.namepara.genrelem:
gdat.dictglob['liststkscond'][r][strgfeat] = []
# for each associated sample associated with the central stacked sample
for k in range(len(indxstksassc[r])):
indxsamptotlcntr = indxtupl[indxstksassc[r][k]][0]
indxpoplcntr = indxtupl[indxstksassc[r][k]][1]
indxelemcntr = indxtupl[indxstksassc[r][k]][2]
for strgfeat in gmod.namepara.genrelem:
temp = getattr(gdat, 'list' + strgfeat)
if temp[indxsamptotlcntr][indxpoplcntr].size > 0:
temp = temp[indxsamptotlcntr][indxpoplcntr][..., indxelemcntr]
gdat.dictglob['liststkscond'][r][strgfeat].append(temp)
for r in range(len(gdat.dictglob['liststkscond'])):
for strgfeat in gmod.namepara.genrelem:
arry = np.stack(gdat.dictglob['liststkscond'][r][strgfeat], axis=0)
gdat.dictglob['poststkscond'][r][strgfeat] = np.zeros(([3] + list(arry.shape[1:])))
gdat.dictglob['poststkscond'][r][strgfeat][0, ...] = median(arry, axis=0)
gdat.dictglob['poststkscond'][r][strgfeat][1, ...] = percennp.tile(arry, 16., axis=0)
gdat.dictglob['poststkscond'][r][strgfeat][2, ...] = percennp.tile(arry, 84., axis=0)
gdat.numbstkscond = len(gdat.dictglob['liststkscond'])
gdat.indxstkscond = np.arange(gdat.numbstkscond)
gdat.prvl = np.empty(gdat.numbstkscond)
for r in gdat.indxstkscond:
gdat.prvl[r] = len(gdat.dictglob['liststkscond'][r]['deltllik'])
gdat.prvl /= gdat.numbsamptotl
gdat.minmprvl = 0.
gdat.maxmprvl = 1.
retr_axis(gdat, 'prvl')
gdat.histprvl = np.histogram(gdat.prvl, bins=gdat.binspara.prvl)[0]
if gdat.makeplot:
pathcond = getattr(gdat, 'path' + strgpdfn + 'finlcond')
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
path = pathcond + 'histdist' + nameparagenrelem
listtemp = np.copy(listdist[k].tonp.array()).flatten()
listtemp = listtemp[np.where(listtemp != 1e20)[0]]
tdpy.mcmc.plot_hist(path, listtemp, r'$\Delta \tilde{' + getattr(gmod.lablrootpara, nameparagenrelem) + '}$')
path = pathcond + 'histprvl'
tdpy.mcmc.plot_hist(path, gdat.prvl, r'$p$')
gdat.prvlthrs = 0.1
gdat.indxprvlhigh = np.where(gdat.prvl > gdat.prvlthrs)[0]
gdat.numbprvlhigh = gdat.indxprvlhigh.size
def retr_conv(gdat, defl):
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
# temp
conv = abs(np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0) + np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) / 2.
conv = conv.flatten()
return conv
def retr_invm(gdat, defl):
# temp
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
invm = (1. - np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0)) * (1. - np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) - \
np.gradient(defl[:, :, 0], gdat.sizepixl, axis=1) * np.gradient(defl[:, :, 1], gdat.sizepixl, axis=0)
invm = invm.flatten()
return invm
def setp_indxswepsave(gdat):
gdat.indxswep = np.arange(gdat.numbswep)
gdat.boolsave = np.zeros(gdat.numbswep, dtype=bool)
gdat.indxswepsave = np.arange(gdat.numbburn, gdat.numbburn + gdat.numbsamp * gdat.factthin, gdat.factthin)
gdat.boolsave[gdat.indxswepsave] = True
gdat.indxsampsave = np.zeros(gdat.numbswep, dtype=int) - 1
gdat.indxsampsave[gdat.indxswepsave] = np.arange(gdat.numbsamp)
def retr_cntspnts(gdat, listposi, spec):
cnts = np.zeros((gdat.numbener, spec.shape[1]))
if gdat.boolbinsspat:
lgal = listposi[0]
bgal = listposi[1]
indxpixlpnts = retr_indxpixl(gdat, bgal, lgal)
else:
elin = listposi[0]
indxpixlpnts = np.zeros_like(elin, dtype=int)
for k in range(spec.shape[1]):
cnts[:, k] += spec[:, k] * gdat.expototl[:, indxpixlpnts[k]]
if gdat.enerdiff:
cnts *= gdat.deltener[:, None]
cnts = np.sum(cnts, axis=0)
return cnts
def retr_mdencrit(gdat, adissour, adishost, adishostsour):
mdencrit = gdat.factnewtlght / 4. / np.pi * adissour / adishostsour / adishost
return mdencrit
def retr_massfrombein(gdat, adissour, adishost, adishostsour):
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
massfrombein = np.pi * adishost**2 * mdencrit
return massfrombein
def retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut):
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
fracacutasca = acut / asca
factmcutfromdefs = np.pi * adishost**2 * mdencrit * asca * retr_mcutfrommscl(fracacutasca)
return factmcutfromdefs
def retr_mcut(gdat, defs, asca, acut, adishost, mdencrit):
mscl = defs * np.pi * adishost**2 * mdencrit * asca
fracacutasca = acut / asca
mcut = mscl * retr_mcutfrommscl(fracacutasca)
return mcut
def retr_mcutfrommscl(fracacutasca):
mcut = fracacutasca**2 / (fracacutasca**2 + 1.)**2 * ((fracacutasca**2 - 1.) * np.log(fracacutasca) + fracacutasca * np.pi - (fracacutasca**2 + 1.))
return mcut
def retr_negalogt(varb):
negalogt = sign(varb) * np.log10(np.fabs(varb))
return negalogt
def retr_gradmaps(gdat, maps):
# temp -- this does not work with vanishing exposure
maps = maps.reshape((gdat.numbsidecart, gdat.numbsidecart))
grad = np.dstack((np.gradient(maps, gdat.sizepixl, axis=0), np.gradient(maps, gdat.sizepixl, axis=1))).reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
grad = grad.reshape((gdat.numbpixlcart, 2))
return grad
def retr_spatmean(gdat, inpt, boolcntp=False):
listspatmean = [[] for b in gdat.indxspatmean]
listspatstdv = [[] for b in gdat.indxspatmean]
for b, namespatmean in enumerate(gdat.listnamespatmean):
if boolcntp:
cntp = inpt[gdat.listindxcubespatmean[b]]
else:
cntp = inpt[gdat.listindxcubespatmean[b]] * gdat.expo[gdat.listindxcubespatmean[b]] * gdat.apix
if gdat.enerdiff:
cntp *= gdat.deltener[:, None, None]
spatmean = np.mean(np.sum(cntp, 2), axis=1) / gdat.apix
spatstdv = np.sqrt(np.sum(cntp, axis=(1, 2))) / gdat.numbdata / gdat.apix
if gdat.boolcorrexpo:
spatmean /= gdat.expototlmean
spatstdv /= gdat.expototlmean
if gdat.enerdiff:
spatmean /= gdat.deltener
spatstdv /= gdat.deltener
listspatmean[b] = spatmean
listspatstdv[b] = spatstdv
return listspatmean, listspatstdv
def retr_rele(gdat, maps, lgal, bgal, defs, asca, acut, indxpixlelem, absv=True, cntpmodl=None):
grad = retr_gradmaps(gdat, maps)
defl = retr_defl(gdat, indxpixlelem, lgal, bgal, defs, asca=asca, acut=acut)
prod = grad * defl
if cntpmodl is not None:
prod /= cntpmodl[:, None]
dotstemp = np.sum(prod, 1)
if absv:
dotstemp = np.fabs(dotstemp)
else:
dotstemp = dotstemp
dots = np.mean(dotstemp)
return dots
def retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgvarb, strgpdfn, strgmome='pmea', indxvarb=None, indxlist=None):
if strgvarb.startswith('cntpdata'):
varb = getattr(gdat, strgvarb)
elif strgvarb.startswith('histcntpdata'):
varb = getattr(gdat, strgvarb)
else:
if strgmodl == 'true':
gmod = getattr(gdat, strgmodl)
gmodstat = getattr(gmod, strgstat)
varb = getattr(gmodstat, strgvarb)
if strgmodl == 'fitt':
if strgstat == 'this':
if strgmome == 'errr':
varb = getattr(gdatmodi, strgstat + 'errr' + strgvarb)
else:
varb = getattr(gdatmodi, strgstat + strgvarb)
if strgstat == 'pdfn':
varb = getattr(gdat, strgmome + strgpdfn + strgvarb)
if indxlist is not None:
varb = varb[indxlist]
if indxvarb is not None:
if strgmome == 'errr':
varb = varb[[slice(None)] + indxvarb]
else:
varb = varb[indxvarb]
return np.copy(varb)
def setp_indxpara(gdat, typesetp, strgmodl='fitt'):
print('setp_indxpara(): Building parameter indices for model %s with type %s...' % (strgmodl, typesetp))
gmod = getattr(gdat, strgmodl)
if typesetp == 'init':
if strgmodl == 'fitt':
gmod.lablmodl = 'Model'
if strgmodl == 'true':
gmod.lablmodl = 'True'
# transdimensional element populations
gmod.numbpopl = len(gmod.typeelem)
gmod.indxpopl = np.arange(gmod.numbpopl)
if gdat.typeexpr != 'user':
# background component
gmod.numbback = 0
gmod.indxback = []
for c in range(len(gmod.typeback)):
if isinstance(gmod.typeback[c], str):
if gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):
namebfun = gmod.typeback[c][:8]
ordrexpa = int(gmod.typeback[c][8:])
numbexpa = 4 * ordrexpa**2
indxexpa = np.arange(numbexpa)
del gmod.typeback[c]
for k in indxexpa:
gmod.typeback.insert(c+k, namebfun + '%04d' % k)
gmod.numbback = len(gmod.typeback)
gmod.indxback = np.arange(gmod.numbback)
gmod.numbbacktotl = np.sum(gmod.numbback)
gmod.indxbacktotl = np.arange(gmod.numbbacktotl)
# galaxy components
gmod.indxsersfgrd = np.arange(gmod.numbsersfgrd)
# name of the generative element parameter used for the amplitude
gmod.nameparagenrelemampl = [[] for l in gmod.indxpopl]
gmod.indxparagenrelemampl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.nameparagenrelemampl[l] = 'per0'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.nameparagenrelemampl[l] = 'lum0'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l].startswith('lghtline'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 1
elif gmod.typeelem[l].startswith('lghtpnts'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l].startswith('lghtgausbgrd'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l] == 'lens':
gmod.nameparagenrelemampl[l] = 'defs'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l].startswith('clus'):
gmod.nameparagenrelemampl[l] = 'nobj'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l] == 'lens':
gmod.nameparagenrelemampl[l] = 'defs'
if gmod.typeelem[l] == 'clus':
gmod.nameparagenrelemampl[l] = 'nobj'
if len(gmod.nameparagenrelemampl[l]) == 0:
raise Exception('Amplitude feature undefined.')
for featpara in gdat.listfeatpara:
for strggrop in gdat.liststrggroppara:
setattr(gmod, 'list' + featpara + 'para' + strggrop, [])
if typesetp == 'finl':
# number of elements in the current state of the true model
if strgmodl == 'true':
gmod.numbelem = np.zeros(gmod.numbpopl)
for l in gmod.indxpopl:
gmod.numbelem[l] += getattr(gmod.maxmpara, 'numbelempop%d' % l)
gmod.numbelemtotl = np.sum(gmod.numbelem)
# element setup
## flag to calculate the kernel approximation errors
boolcalcerrr = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelemspateval[l] == 'locl' and gdat.numbpixlfull < 1e5:
# temp
boolcalcerrr[l] = False
else:
boolcalcerrr[l] = False
setp_varb(gdat, 'boolcalcerrr', valu=boolcalcerrr, strgmodl=strgmodl)
# maximum number of elements for each population
gmod.maxmpara.numbelem = np.zeros(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.maxmpara.numbelem[l] = getattr(gmod.maxmpara, 'numbelempop%d' % l)
# maximum number of elements summed over all populations
gmod.maxmpara.numbelemtotl = np.sum(gmod.maxmpara.numbelem)
## sorting feature
nameparaelemsort = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
# feature to be used to sort elements
if gmod.typeelem[l].startswith('lght'):
nameparaelemsort[l] = 'flux'
if gmod.typeelem[l] == 'lens':
nameparaelemsort[l] = 'defs'
if gmod.typeelem[l].startswith('clus'):
nameparaelemsort[l] = 'nobj'
## label extensions
gmod.lablelemextn = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gdat.numbgrid > 1:
if gmod.typeelem[l] == 'lghtpnts':
gmod.lablelemextn[l] = r'\rm{fps}'
if gmod.typeelem[l] == 'lghtgausbgrd':
gmod.lablelemextn[l] = r'\rm{bgs}'
else:
if gmod.typeelem[l].startswith('lghtpntspuls'):
gmod.lablelemextn[l] = r'\rm{pul}'
if gmod.typeelem[l].startswith('lghtpntsagnn'):
gmod.lablelemextn[l] = r'\rm{agn}'
elif gmod.typeelem[l] == 'lghtpnts':
gmod.lablelemextn[l] = r'\rm{pts}'
if gmod.typeelem[l] == 'lens':
gmod.lablelemextn[l] = r'\rm{sub}'
if gmod.typeelem[l].startswith('clus'):
gmod.lablelemextn[l] = r'\rm{cls}'
if gmod.typeelem[l].startswith('lghtline'):
gmod.lablelemextn[l] = r'\rm{lin}'
gmod.indxpoplgrid = [[] for y in gdat.indxgrid]
for y in gdat.indxgrid:
for indx, typeelemtemp in enumerate(gmod.typeelem):
# foreground grid (image plane) -- the one np.where the data is measured
if y == 0:
if typeelemtemp.startswith('lght') and not typeelemtemp.endswith('bgrd') or typeelemtemp.startswith('clus'):
gmod.indxpoplgrid[y].append(indx)
# foreground mass grid
if y == 1:
if typeelemtemp.startswith('lens'):
gmod.indxpoplgrid[y].append(indx)
# background grid (source plane)
if y == 2:
if typeelemtemp.endswith('bgrd'):
gmod.indxpoplgrid[y].append(indx)
indxgridpopl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
for y in gdat.indxgrid:
if l in gmod.indxpoplgrid[y]:
indxgridpopl[l] = y
calcelemsbrt = False
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpnts'):
calcelemsbrt = True
if 'lghtgausbgrd' in gmod.typeelem:
calcelemsbrtbgrd = True
else:
calcelemsbrtbgrd = False
if gmod.boollenssubh:
calcelemdefl = True
else:
calcelemdefl = False
## element Boolean flags
gmod.boolelemlght = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
gmod.boolelemlght[l] = True
else:
gmod.boolelemlght[l] = False
gmod.boolelemlghtanyy = True in gmod.boolelemlght
gmod.boolelemlens = False
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lens'):
gmod.boolelemlens = True
gmod.boolelemsbrtdfnc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0 and (gmod.typeelem[l].startswith('lght') and not gmod.typeelem[l].endswith('bgrd') or gmod.typeelem[l].startswith('clus')):
gmod.boolelemsbrtdfnc[l] = True
else:
gmod.boolelemsbrtdfnc[l] = False
gmod.boolelemsbrtdfncanyy = True in gmod.boolelemsbrtdfnc
gmod.boolelemdeflsubh = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
gmod.boolelemdeflsubh[l] = True
else:
gmod.boolelemdeflsubh[l] = False
gmod.boolelemdeflsubhanyy = True in gmod.boolelemdeflsubh
gmod.boolelemsbrtextsbgrd = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l].endswith('bgrd'):
gmod.boolelemsbrtextsbgrd[l] = True
else:
gmod.boolelemsbrtextsbgrd[l] = False
gmod.boolelemsbrtextsbgrdanyy = True in gmod.boolelemsbrtextsbgrd
if gmod.boolelemsbrtextsbgrdanyy:
gmod.indxpopllens = 1
else:
gmod.indxpopllens = 0
gmod.boolelemsbrtpnts = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l] != 'lghtline' or gmod.typeelem[l] == 'clus':
gmod.boolelemsbrtpnts[l] = True
else:
gmod.boolelemsbrtpnts[l] = False
gmod.boolelemsbrtpntsanyy = True in gmod.boolelemsbrtpnts
# temp -- because there is currently no extended source
gmod.boolelemsbrt = gmod.boolelemsbrtdfnc
gmod.boolelempsfn = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpnts') or gmod.typeelem[l] == 'clus':
gmod.boolelempsfn[l] = True
else:
gmod.boolelempsfn[l] = False
gmod.boolelempsfnanyy = True in gmod.boolelempsfn
spectype = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.boolelemlght[l]:
spectype[l] = 'powr'
else:
spectype[l] = 'none'
setp_varb(gdat, 'spectype', valu=spectype, strgmodl=strgmodl)
minmgwdt = 2. * gdat.sizepixl
maxmgwdt = gdat.maxmgangdata / 4.
setp_varb(gdat, 'gwdt', minm=minmgwdt, maxm=maxmgwdt, strgmodl=strgmodl)
setp_varb(gdat, 'aerr', minm=-100, maxm=100, strgmodl=strgmodl, popl='full')
if gmod.boolelemlghtanyy:
# flux
if gdat.typeexpr == 'ferm':
minmflux = 1e-9
maxmflux = 1e-6
if gdat.typeexpr == 'tess':
minmflux = 1.
maxmflux = 1e3
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
minmflux = 1e4
maxmflux = 1e7
else:
minmflux = 3e-9
maxmflux = 1e-6
if gdat.typeexpr == 'gene':
minmflux = 0.1
maxmflux = 100.
if gdat.typeexpr == 'hubb':
minmflux = 1e-20
maxmflux = 1e-17
if gdat.typeexpr == 'fire':
minmflux = 1e-20
maxmflux = 1e-17
setp_varb(gdat, 'flux', limt=[minmflux, maxmflux], strgmodl=strgmodl)
if gdat.typeexpr == 'ferm':
setp_varb(gdat, 'brekprioflux', limt=[3e-9, 1e-6], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'sloplowrprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'slopupprprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
if gdat.boolbinsener:
### spectral parameters
if gdat.typeexpr == 'ferm':
sind = [1., 3.]
minmsind = 1.
maxmsind = 3.
if gdat.typeexpr == 'chan':
minmsind = 0.4
maxmsind = 2.4
sind = [0.4, 2.4]
if gdat.typeexpr == 'hubb':
minmsind = 0.5
maxmsind = 2.5
sind = [0.4, 2.4]
if gdat.typeexpr != 'fire':
setp_varb(gdat, 'sind', limt=[minmsind, maxmsind], strgmodl=strgmodl)
setp_varb(gdat, 'curv', limt=[-1., 1.], strgmodl=strgmodl)
setp_varb(gdat, 'expc', limt=[0.1, 10.], strgmodl=strgmodl)
setp_varb(gdat, 'sinddistmean', limt=sind, popl='full', strgmodl=strgmodl)
#### standard deviations should not be too small
setp_varb(gdat, 'sinddiststdv', limt=[0.3, 2.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'curvdistmean', limt=[-1., 1.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'curvdiststdv', limt=[0.1, 1.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'expcdistmean', limt=[1., 8.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'expcdiststdv', limt=[0.01 * gdat.maxmener, gdat.maxmener], popl='full', strgmodl=strgmodl)
for i in gdat.indxenerinde:
setp_varb(gdat, 'sindcolr0001', limt=[-2., 6.], strgmodl=strgmodl)
setp_varb(gdat, 'sindcolr0002', limt=[0., 8.], strgmodl=strgmodl)
setp_varb(gdat, 'sindcolr%04d' % i, limt=[-5., 10.], strgmodl=strgmodl)
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
setp_varb(gdat, 'gang', limt=[1e-1 * gdat.sizepixl, gdat.maxmgangdata], strgmodl=strgmodl)
setp_varb(gdat, 'geff', limt=[0., 0.4], strgmodl=strgmodl)
setp_varb(gdat, 'dglc', limt=[10., 3e3], strgmodl=strgmodl)
setp_varb(gdat, 'phii', limt=[0., 2. * np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'thet', limt=[0., np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'per0distmean', limt=[5e-4, 1e1], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdistmean', limt=[1e7, 1e16], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'per0diststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdiststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'gangslop', limt=[0.5, 4.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'dglcslop', limt=[0.5, 2.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'spatdistcons', limt=[1e-4, 1e-2], popl='full')
setp_varb(gdat, 'bgaldistscal', limt=[0.5 / gdat.anglfact, 5. / gdat.anglfact], popl='full', strgmodl=strgmodl)
if gmod.typeelem[l] == 'lghtpntsagnntrue':
setp_varb(gdat, 'dlos', limt=[1e7, 1e9], strgmodl=strgmodl)
setp_varb(gdat, 'dlosslop', limt=[-0.5, -3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0', limt=[1e43, 1e46], strgmodl=strgmodl)
setp_varb(gdat, 'lum0distbrek', limt=[1e42, 1e46], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0sloplowr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0slopuppr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
# construct background surface brightness templates from the user input
gmod.sbrtbacknorm = [[] for c in gmod.indxback]
gmod.boolunifback = np.ones(gmod.numbback, dtype=bool)
for c in gmod.indxback:
gmod.sbrtbacknorm[c] = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
if gmod.typeback[c] == 'data':
gmod.sbrtbacknorm[c] = np.copy(gdat.sbrtdata)
gmod.sbrtbacknorm[c][np.where(gmod.sbrtbacknorm[c] == 0.)] = 1e-100
elif isinstance(gmod.typeback[c], float):
gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c]
elif isinstance(gmod.typeback[c], list) and isinstance(gmod.typeback[c], float):
gmod.sbrtbacknorm[c] = retr_spec(gdat, np.array([gmod.typeback[c]]), sind=np.array([gmod.typeback[c]]))[:, 0, None, None]
elif isinstance(gmod.typeback[c], np.ndarray) and gmod.typeback[c].ndim == 1:
gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c][:, None, None]
elif gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):
indxexpatemp = int(gmod.typeback[c][8:])
indxterm = indxexpatemp // ordrexpa**2
indxexpaxdat = (indxexpatemp % ordrexpa**2) // ordrexpa + 1
indxexpaydat = (indxexpatemp % ordrexpa**2) % ordrexpa + 1
if namebfun == 'bfunfour':
ampl = 1.
func = gdat.meanpara.bgalcart
if namebfun == 'bfunwfou':
functemp = np.exp(-0.5 * (gdat.meanpara.bgalcart / (1. / gdat.anglfact))**2)
ampl = np.sqrt(functemp)
func = functemp
argslgal = 2. * np.pi * indxexpaxdat * gdat.meanpara.lgalcart / gdat.maxmgangdata
argsbgal = 2. * np.pi * indxexpaydat * func / gdat.maxmgangdata
if indxterm == 0:
termfrst = np.sin(argslgal)
termseco = ampl * np.sin(argsbgal)
if indxterm == 1:
termfrst = np.sin(argslgal)
termseco = ampl * np.cos(argsbgal)
if indxterm == 2:
termfrst = np.cos(argslgal)
termseco = ampl * np.sin(argsbgal)
if indxterm == 3:
termfrst = np.cos(argslgal)
termseco = ampl * np.cos(argsbgal)
gmod.sbrtbacknorm[c] = (termfrst[None, :] * termseco[:, None]).flatten()[None, :, None] * \
np.ones((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
else:
path = gdat.pathinpt + gmod.typeback[c]
gmod.sbrtbacknorm[c] = astropy.io.fits.getdata(path)
if gdat.typepixl == 'cart':
if not gdat.boolforccart:
if gmod.sbrtbacknorm[c].shape[2] != gdat.numbsidecart:
raise Exception('Provided background template must have the chosen image dimensions.')
gmod.sbrtbacknorm[c] = gmod.sbrtbacknorm[c].reshape((gmod.sbrtbacknorm[c].shape[0], -1, gmod.sbrtbacknorm[c].shape[-1]))
if gdat.typepixl == 'cart' and gdat.boolforccart:
sbrtbacknormtemp = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
sbrtbacknormtemp[i, :, m] = tdpy.retr_cart(gmod.sbrtbacknorm[c][i, :, m], \
numbsidelgal=gdat.numbsidecart, numbsidebgal=gdat.numbsidecart, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata).flatten()
gmod.sbrtbacknorm[c] = sbrtbacknormtemp
# determine spatially uniform background templates
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
if np.std(gmod.sbrtbacknorm[c][i, :, m]) > 1e-6:
gmod.boolunifback[c] = False
boolzero = True
gmod.boolbfun = False
for c in gmod.indxback:
if np.amin(gmod.sbrtbacknorm[c]) < 0. and isinstance(gmod.typeback[c], str) and not gmod.typeback[c].startswith('bfun'):
booltemp = False
raise Exception('Background templates must be positive-definite every where.')
if not np.isfinite(gmod.sbrtbacknorm[c]).all():
raise Exception('Background template is not finite.')
if np.amin(gmod.sbrtbacknorm[c]) > 0. or gmod.typeback[c] == 'data':
boolzero = False
if isinstance(gmod.typeback[c], str) and gmod.typeback[c].startswith('bfun'):
gmod.boolbfun = True
if boolzero and not gmod.boolbfun:
raise Exception('At least one background template must be positive everynp.where.')
# temp -- does not take into account dark hosts
gmod.boolhost = gmod.typeemishost != 'none'
# type of PSF evaluation
if gmod.maxmpara.numbelemtotl > 0 and gmod.boolelempsfnanyy:
if gmod.typeemishost != 'none' or not gmod.boolunifback.all():
# the background is not convolved by a kernel and point sources exist
typeevalpsfn = 'full'
else:
# the background is not convolved by a kernel and point sources exist
typeevalpsfn = 'kern'
else:
if gmod.typeemishost != 'none' or not gmod.boolunifback.all():
# the background is convolved by a kernel, no point source exists
typeevalpsfn = 'conv'
else:
# the background is not convolved by a kernel, no point source exists
typeevalpsfn = 'none'
setp_varb(gdat, 'typeevalpsfn', valu=typeevalpsfn, strgmodl=strgmodl)
if gdat.typeverb > 1:
print('gmod.typeevalpsfn')
print(gmod.typeevalpsfn)
gmod.boolapplpsfn = gmod.typeevalpsfn != 'none'
### PSF model
if gmod.typeevalpsfn != 'none':
if gmod.typemodlpsfn == 'singgaus':
numbpsfpform = 1
elif gmod.typemodlpsfn == 'singking':
numbpsfpform = 2
elif gmod.typemodlpsfn == 'doubgaus':
numbpsfpform = 3
elif gmod.typemodlpsfn == 'gausking':
numbpsfpform = 4
elif gmod.typemodlpsfn == 'doubking':
numbpsfpform = 5
gmod.numbpsfptotl = numbpsfpform
if gdat.boolpriopsfninfo:
for i in gdat.indxener:
for m in gdat.indxevtt:
meansigc = gmod.psfpexpr[i * gmod.numbpsfptotl + m * gmod.numbpsfptotl * gdat.numbener]
stdvsigc = meansigc * 0.1
setp_varb(gdat, 'sigcen%02devt%d' % (i, m), mean=meansigc, stdv=stdvsigc, lablroot='$\sigma$', scal='gaus', \
strgmodl=strgmodl)
if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':
meangamc = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 1]
stdvgamc = meangamc * 0.1
setp_varb(gdat, 'gamcen%02devt%d' % (i, m), mean=meangamc, stdv=stdvgamc, strgmodl=strgmodl)
if gmod.typemodlpsfn == 'doubking':
meansigt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 2]
stdvsigt = meansigt * 0.1
setp_varb(gdat, 'sigten%02devt%d' % (i, m), mean=meansigt, stdv=stdvsigt, strgmodl=strgmodl)
meangamt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 3]
stdvgamt = meangamt * 0.1
setp_varb(gdat, 'gamten%02devt%d' % (i, m), mean=meangamt, stdv=stdvgamt, strgmodl=strgmodl)
meanpsff = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 4]
stdvpsff = meanpsff * 0.1
setp_varb(gdat, 'psffen%02devt%d' % (i, m), mean=meanpsff, stdv=stdvpsff, strgmodl=strgmodl)
else:
if gdat.typeexpr == 'gene':
minmsigm = 0.01 / gdat.anglfact
maxmsigm = 0.1 / gdat.anglfact
if gdat.typeexpr == 'ferm':
minmsigm = 0.1
maxmsigm = 10.
if gdat.typeexpr == 'hubb':
minmsigm = 0.01 / gdat.anglfact
maxmsigm = 0.1 / gdat.anglfact
if gdat.typeexpr == 'chan':
minmsigm = 0.1 / gdat.anglfact
maxmsigm = 2. / gdat.anglfact
minmgamm = 1.5
maxmgamm = 20.
setp_varb(gdat, 'sigc', minm=minmsigm, maxm=maxmsigm, lablroot='$\sigma_c$', ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'sigt', minm=minmsigm, maxm=maxmsigm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'gamc', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'gamt', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'psff', minm=0., maxm=1., ener='full', evtt='full', strgmodl=strgmodl)
# background
## number of background parameters
numbbacp = 0
for c in gmod.indxback:
if gmod.boolspecback[c]:
numbbacp += 1
else:
numbbacp += gdat.numbener
## background parameter indices
gmod.indxbackbacp = np.zeros(numbbacp, dtype=int)
indxenerbacp = np.zeros(numbbacp, dtype=int)
cntr = 0
for c in gmod.indxback:
if gmod.boolspecback[c]:
gmod.indxbackbacp[cntr] = c
cntr += 1
else:
for i in gdat.indxener:
indxenerbacp[cntr] = i
gmod.indxbackbacp[cntr] = c
cntr += 1
# indices of background parameters for each background component
gmod.indxbacpback = [[] for c in gmod.indxback]
for c in gmod.indxback:
gmod.indxbacpback[c] = np.where((gmod.indxbackbacp == c))[0]
# list of names of diffuse components
gmod.listnamediff = []
for c in gmod.indxback:
gmod.listnamediff += ['back%04d' % c]
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
gmod.listnamediff += ['hostisf%d' % e]
if gmod.boollens:
gmod.listnamediff += ['lens']
# list of names of emission components
listnameecom = deepcopy(gmod.listnamediff)
for l in gmod.indxpopl:
if gmod.boolelemsbrt[l]:
if strgmodl == 'true' and gmod.numbelem[l] > 0 or strgmodl == 'fitt' and gmod.maxmpara.numbelem[l] > 0:
if not 'dfnc' in listnameecom:
listnameecom += ['dfnc']
if not 'dfncsubt' in listnameecom:
listnameecom += ['dfncsubt']
gmod.listnameecomtotl = listnameecom + ['modl']
for c in gmod.indxback:
setp_varb(gdat, 'cntpback%04d' % c, lablroot='$C_{%d}$' % c, minm=1., maxm=100., scal='logt', strgmodl=strgmodl)
gmod.listnamegcom = deepcopy(gmod.listnameecomtotl)
if gmod.boollens:
gmod.listnamegcom += ['bgrd']
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
gmod.listnamegcom += ['bgrdgalx', 'bgrdexts']
numbdiff = len(gmod.listnamediff)
convdiff = np.zeros(numbdiff, dtype=bool)
for k, namediff in enumerate(gmod.listnamediff):
if not (gdat.boolthindata or gmod.typeevalpsfn == 'none' or gmod.typeevalpsfn == 'kern'):
if namediff.startswith('back'):
indx = int(namediff[-4:])
convdiff[k] = not gmod.boolunifback[indx]
else:
convdiff[k] = True
# element parameters that correlate with the statistical significance of the element
gmod.namepara.elemsign = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.elemsign[l] = 'flux'
if gmod.typeelem[l] == 'lens':
gmod.namepara.elemsign[l] = 'defs'
if gmod.typeelem[l].startswith('clus'):
gmod.namepara.elemsign[l] = 'nobj'
if gdat.typeverb > 0:
if strgmodl == 'true':
strgtemp = 'true'
if strgmodl == 'fitt':
strgtemp = 'fitting'
print('Building elements for the %s model...' % strgtemp)
# define the names and scalings of element parameters
gmod.namepara.genrelem = [[] for l in gmod.indxpopl]
gmod.listscalparagenrelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
gmod.namepara.genrelem[l] = ['elin']
gmod.listscalparagenrelem[l] = ['logt']
elif gmod.typespatdist[l] == 'diskscal':
gmod.namepara.genrelem[l] = ['lgal', 'bgal']
gmod.listscalparagenrelem[l] = ['self', 'dexp']
elif gmod.typespatdist[l] == 'gangexpo':
gmod.namepara.genrelem[l] = ['gang', 'aang']
gmod.listscalparagenrelem[l] = ['expo', 'self']
elif gmod.typespatdist[l] == 'glc3':
gmod.namepara.genrelem[l] = ['dglc', 'thet', 'phii']
gmod.listscalparagenrelem[l] = ['powr', 'self', 'self']
else:
gmod.namepara.genrelem[l] = ['lgal', 'bgal']
gmod.listscalparagenrelem[l] = ['self', 'self']
# amplitude
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.genrelem[l] += ['lum0']
gmod.listscalparagenrelem[l] += ['dpowslopbrek']
elif gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.genrelem[l] += ['per0']
gmod.listscalparagenrelem[l] += ['lnormeanstdv']
elif gmod.typeelem[l].startswith('lght'):
gmod.namepara.genrelem[l] += ['flux']
gmod.listscalparagenrelem[l] += [gmod.typeprioflux[l]]
elif gmod.typeelem[l] == 'lens':
gmod.namepara.genrelem[l] += ['defs']
gmod.listscalparagenrelem[l] += ['powr']
elif gmod.typeelem[l].startswith('clus'):
gmod.namepara.genrelem[l] += ['nobj']
gmod.listscalparagenrelem[l] += ['powr']
# shape
if gmod.typeelem[l] == 'lghtgausbgrd' or gmod.typeelem[l] == 'clusvari':
gmod.namepara.genrelem[l] += ['gwdt']
gmod.listscalparagenrelem[l] += ['powr']
if gmod.typeelem[l] == 'lghtlinevoig':
gmod.namepara.genrelem[l] += ['sigm']
gmod.listscalparagenrelem[l] += ['logt']
gmod.namepara.genrelem[l] += ['gamm']
gmod.listscalparagenrelem[l] += ['logt']
# others
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.genrelem[l] += ['magf']
gmod.listscalparagenrelem[l] += ['lnormeanstdv']
gmod.namepara.genrelem[l] += ['geff']
gmod.listscalparagenrelem[l] += ['self']
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.genrelem[l] += ['dlos']
gmod.listscalparagenrelem[l] += ['powr']
if gdat.numbener > 1 and gmod.typeelem[l].startswith('lghtpnts'):
if gmod.spectype[l] == 'colr':
for i in gdat.indxener:
if i == 0:
continue
gmod.namepara.genrelem[l] += ['sindcolr%04d' % i]
gmod.listscalparagenrelem[l] += ['self']
else:
gmod.namepara.genrelem[l] += ['sind']
gmod.listscalparagenrelem[l] += ['self']
if gmod.spectype[l] == 'curv':
gmod.namepara.genrelem[l] += ['curv']
gmod.listscalparagenrelem[l] += ['self']
if gmod.spectype[l] == 'expc':
gmod.namepara.genrelem[l] += ['expc']
gmod.listscalparagenrelem[l] += ['self']
if gmod.typeelem[l] == 'lens':
if gdat.variasca:
gmod.namepara.genrelem[l] += ['asca']
gmod.listscalparagenrelem[l] += ['self']
if gdat.variacut:
gmod.namepara.genrelem[l] += ['acut']
gmod.listscalparagenrelem[l] += ['self']
# names of element parameters for each scaling
gmod.namepara.genrelemscal = [{} for l in gmod.indxpopl]
for l in gmod.indxpopl:
for scaltype in gdat.listscaltype:
gmod.namepara.genrelemscal[l][scaltype] = []
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if scaltype == gmod.listscalparagenrelem[l][k]:
gmod.namepara.genrelemscal[l][scaltype].append(nameparagenrelem)
# variables for which whose marginal distribution and pair-correlations will be plotted
gmod.namepara.derielemodim = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.namepara.derielemodim[l] = deepcopy(gmod.namepara.genrelem[l])
gmod.namepara.derielemodim[l] += ['deltllik']
if gdat.boolbinsspat:
if not 'lgal' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['lgal']
if not 'bgal' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['bgal']
if not 'gang' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['gang']
if not 'aang' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['aang']
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.derielemodim[l] += ['cnts']
if gdat.typeexpr == 'ferm':
gmod.namepara.derielemodim[l] + ['sbrt0018']
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.derielemodim[l] += ['reds']
gmod.namepara.derielemodim[l] += ['lumi']
gmod.namepara.derielemodim[l] += ['flux']
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.derielemodim[l] += ['lumi']
gmod.namepara.derielemodim[l] += ['flux']
gmod.namepara.derielemodim[l] += ['mass']
gmod.namepara.derielemodim[l] += ['dlos']
if gmod.typeelem[l] == 'lens':
gmod.namepara.derielemodim[l] += ['mcut', 'diss', 'rele', 'reln', 'relk', 'relf', 'relm', 'reld', 'relc']
#for k in range(len(gmod.namepara.derielemodim[l])):
# gmod.namepara.derielemodim[l][k] += 'pop%d' % l
# check later
# temp
#if strgmodl == 'fitt':
# for q in gdat.indxrefr:
# if gmod.nameparagenrelemampl[l] in gdat.refr.namepara.elem[q]:
# gmod.namepara.derielemodim[l].append('aerr' + gdat.listnamerefr[q])
if gdat.typeverb > 1:
print('gmod.namepara.derielemodim')
print(gmod.namepara.derielemodim)
# derived element parameters
gmod.namepara.derielem = gmod.namepara.derielemodim[:]
if gdat.typeverb > 1:
print('gmod.namepara.derielem')
print(gmod.namepara.derielem)
# derived parameters
gmod.listnameparaderitotl = [temptemp for temp in gmod.namepara.derielem for temptemp in temp]
#gmod.listnameparaderitotl += gmod.namepara.scal
for namediff in gmod.listnamediff:
gmod.listnameparaderitotl += ['cntp' + namediff]
if gdat.typeverb > 1:
print('gmod.listnameparaderitotl')
print(gmod.listnameparaderitotl)
if strgmodl == 'fitt':
# add reference element parameters that are not available in the fitting model
gdat.refr.namepara.elemonly = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
gmod.namepara.extrelem = [[] for l in gmod.indxpopl]
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
for name in gdat.refr.namepara.elem[q]:
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and (name == 'defs' or name == 'acut' or name == 'asca' or name == 'mass'):
continue
if gmod.typeelem[l] == ('lens') and (name == 'cnts' or name == 'flux' or name == 'spec' or name == 'sind'):
continue
if not name in gmod.namepara.derielemodim[l]:
nametotl = name + gdat.listnamerefr[q]
if name == 'etag':
continue
gmod.namepara.derielemodim[l].append(nametotl)
if gdat.refr.numbelem[q] == 0:
continue
gdat.refr.namepara.elemonly[q][l].append(name)
if not nametotl in gmod.namepara.extrelem[l]:
gmod.namepara.extrelem[l].append(nametotl)
#if name == 'reds':
# for nametemp in ['lumi', 'dlos']:
# nametemptemp = nametemp + gdat.listnamerefr[q]
# if not nametemptemp in gmod.namepara.extrelem[l]:
# gmod.namepara.derielemodim[l].append(nametemp + gdat.listnamerefr[q])
# gmod.namepara.extrelem[l].append(nametemptemp)
if gdat.typeverb > 1:
print('gdat.refr.namepara.elemonly')
print(gdat.refr.namepara.elemonly)
if gdat.typeexpr == 'chan' and gdat.typedata == 'inpt':
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpnts':
gmod.namepara.extrelem[l].append('lumiwo08')
gmod.namepara.derielemodim[l].append('lumiwo08')
if gdat.typeverb > 1:
print('gmod.namepara.extrelem')
print(gmod.namepara.extrelem)
# defaults
gmod.liststrgpdfnmodu = [[] for l in gmod.indxpopl]
gmod.namepara.genrelemmodu = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
if gdat.typeexpr == 'ferm' and gdat.lgalcntr == 0.:
if l == 1:
gmod.liststrgpdfnmodu[l] += ['tmplnfwp']
gmod.namepara.genrelemmodu[l] += ['lgalbgal']
if l == 2:
gmod.liststrgpdfnmodu[l] += ['tmplnfwp']
gmod.namepara.genrelemmodu[l] += ['lgalbgal']
gmod.namepara.elem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
for liststrg in [gmod.namepara.genrelem[l], gmod.namepara.derielemodim[l]]:
for strgthis in liststrg:
if not strgthis in gmod.namepara.elem[l]:
gmod.namepara.elem[l].append(strgthis)
# temp
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
gmod.namepara.genrelem[l] += ['spec']
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.genrelem[l] += ['spec', 'specplot']
if gmod.typeelem[l] == 'lens':
gmod.namepara.genrelem[l] += ['deflprof']
#gmod.namepara.genrelemeval = [[] for l in gmod.indxpopl]
#for l in gmod.indxpopl:
# if gmod.typeelem[l].startswith('clus'):
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'nobj']
# if gmod.typeelem[l] == 'clusvari':
# gmod.namepara.genrelemeval[l] += ['gwdt']
# if gmod.typeelem[l] == 'lens':
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'defs', 'asca', 'acut']
# if gmod.typeelem[l].startswith('lghtline'):
# gmod.namepara.genrelemeval[l] = ['elin', 'spec']
# elif gmod.typeelem[l] == 'lghtgausbgrd':
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'gwdt', 'spec']
# elif gmod.typeelem[l].startswith('lght'):
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'spec']
## element legends
lablpopl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gdat.numbgrid > 1:
if gmod.typeelem[l] == 'lghtpnts':
lablpopl[l] = 'FPS'
if gmod.typeelem[l] == 'lghtgausbgrd':
lablpopl[l] = 'BGS'
else:
if gmod.typeelem[l] == 'lghtpntspuls':
lablpopl[l] = 'Pulsar'
elif gmod.typeelem[l].startswith('lghtpntsagnn'):
lablpopl[l] = 'AGN'
elif gmod.typeelem[l].startswith('lghtpnts'):
lablpopl[l] = 'PS'
if gmod.typeelem[l] == 'lens':
lablpopl[l] = 'Subhalo'
if gmod.typeelem[l].startswith('clus'):
lablpopl[l] = 'Cluster'
if gmod.typeelem[l].startswith('lghtline'):
lablpopl[l]= 'Line'
setp_varb(gdat, 'lablpopl', valu=lablpopl, strgmodl=strgmodl)
if strgmodl == 'true':
gmod.indxpoplassc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.numbpopl == 3 and gmod.typeelem[1] == 'lens':
gmod.indxpoplassc[l] = [l]
else:
gmod.indxpoplassc[l] = gmod.indxpopl
# variables for which two dimensional histograms will be calculated
gmod.namepara.genrelemcorr = [[] for l in gmod.indxpopl]
if gdat.boolplotelemcorr:
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.derielemodim[l]:
gmod.namepara.genrelemcorr[l].append(strgfeat)
# number of element parameters
if gmod.numbpopl > 0:
gmod.numbparagenrelemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaderielemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaelemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelem = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelemcuml = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelemcumr = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaderielem = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaelem = np.zeros(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
# number of generative element parameters for a single element of a specific population
gmod.numbparagenrelemsing[l] = len(gmod.namepara.genrelem[l])
# number of derived element parameters for a single element of a specific population
gmod.numbparaderielemsing[l] = len(gmod.namepara.derielem[l])
# number of element parameters for a single element of a specific population
gmod.numbparaelemsing[l] = len(gmod.namepara.elem[l])
# number of generative element parameters for all elements of a specific population
gmod.numbparagenrelem[l] = gmod.numbparagenrelemsing[l] * gmod.maxmpara.numbelem[l]
# number of generative element parameters up to the beginning of a population
gmod.numbparagenrelemcuml[l] = np.sum(gmod.numbparagenrelem[:l])
# number of generative element parameters up to the end of a population
gmod.numbparagenrelemcumr[l] = np.sum(gmod.numbparagenrelem[:l+1])
# number of derived element parameters for all elements of a specific population
gmod.numbparaderielem[l] = gmod.numbparaderielemsing[l] * gmod.numbelem[l]
# number of element parameters for all elements of a specific population
gmod.numbparaelem[l] = gmod.numbparaelemsing[l] * gmod.numbelem[l]
# number of generative element parameters summed over all populations
gmod.numbparagenrelemtotl = np.sum(gmod.numbparagenrelem)
# number of derived element parameters summed over all populations
gmod.numbparaderielemtotl = np.sum(gmod.numbparaderielem)
# number of element parameters summed over all populations
gmod.numbparaelemtotl = np.sum(gmod.numbparaderielem)
gmod.indxparagenrelemsing = []
for l in gmod.indxpopl:
gmod.indxparagenrelemsing.append(np.arange(gmod.numbparagenrelemsing[l]))
gmod.indxparaderielemsing = []
for l in gmod.indxpopl:
gmod.indxparaderielemsing.append(np.arange(gmod.numbparaderielemsing[l]))
gmod.indxparaelemsing = []
for l in gmod.indxpopl:
gmod.indxparaelemsing.append(np.arange(gmod.numbparaelemsing[l]))
# size of the auxiliary variable propobability density vector
if gmod.maxmpara.numbelemtotl > 0:
gmod.numblpri = 3 + gmod.numbparagenrelem * gmod.numbpopl
else:
gmod.numblpri = 0
if gdat.penalpridiff:
gmod.numblpri += 1
indxlpri = np.arange(gmod.numblpri)
# append the population tags to element parameter names
#for l in gmod.indxpopl:
# gmod.namepara.genrelem[l] = [gmod.namepara.genrelem[l][g] + 'pop%d' % l for g in gmod.indxparagenrelemsing[l]]
gmod.boolcompposi = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.boolcompposi[l] = np.zeros(gmod.numbparagenrelemsing[l], dtype=bool)
if gmod.typeelem[l].startswith('lghtline'):
gmod.boolcompposi[l][0] = True
else:
gmod.boolcompposi[l][0] = True
gmod.boolcompposi[l][1] = True
# list of strings across all populations
## all (generative and derived) element parameters
gmod.numbparaelem = len(gmod.namepara.elem)
gmod.indxparaelem = np.arange(gmod.numbparaelem)
# flattened list of generative element parameters
gmod.listnameparagenfelem = []
for l in gmod.indxpopl:
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmod.listnameparagenfelem.append(nameparagenrelem + 'pop%d' % l)
# concatenated list of flattened generative and derived element parameters
gmod.listnameparatotlelem = gmod.listnameparagenfelem + gmod.namepara.derielem
gmod.numbparaelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.numbparaelem[l] = len(gmod.namepara.elem[l])
numbdeflsubhplot = 2
numbdeflsingplot = numbdeflsubhplot
if gmod.numbparaelem > 0:
numbdeflsingplot += 3
gmod.convdiffanyy = True in convdiff
cntr = tdpy.cntr()
if gmod.boollens:
adishost = gdat.adisobjt(redshost)
adissour = gdat.adisobjt(redssour)
adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost
massfrombein = retr_massfrombein(gdat, adissour, adishost, adishostsour)
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
# object of parameter indices
gmod.indxpara = tdpy.gdatstrt()
# define parameter indices
if gmod.numbparaelem > 0:
# number of elements
#gmod.indxpara.numbelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
indx = cntr.incr()
setattr(gmod.indxpara, 'numbelempop%d' % l, indx)
#gmod.indxpara.numbelem[l] = indx
# hyperparameters
## mean number of elements
if gmod.typemodltran == 'pois':
#gmod.indxpara.meanelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
indx = cntr.incr()
setattr(gmod.indxpara, 'meanelempop%d' % l, indx)
#gmod.indxpara.meanelem[l] = indx
## parameters parametrizing priors on element parameters
liststrgvarb = []
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
for strgpdfnelemgenr, strgfeat in zip(gmod.listscalparagenrelem[l], gmod.namepara.genrelem[l]):
if strgpdfnelemgenr == 'expo' or strgpdfnelemgenr == 'dexp':
liststrgvarb += [strgfeat + 'distscal']
if strgpdfnelemgenr == 'powr':
liststrgvarb += ['slopprio' + strgfeat + 'pop%d' % l]
if strgpdfnelemgenr == 'dpow':
liststrgvarb += [strgfeat + 'distbrek']
liststrgvarb += [strgfeat + 'sloplowr']
liststrgvarb += [strgfeat + 'slopuppr']
if strgpdfnelemgenr == 'gausmean' or strgpdfnelemgenr == 'lnormean':
liststrgvarb += [strgfeat + 'distmean']
if strgpdfnelemgenr == 'gausstdv' or strgpdfnelemgenr == 'lnorstdv':
liststrgvarb += [strgfeat + 'diststdv']
if strgpdfnelemgenr == 'gausmeanstdv' or strgpdfnelemgenr == 'lnormeanstdv':
liststrgvarb += [nameparagenrelem + 'distmean', nameparagenrelem + 'diststdv']
for strgvarb in liststrgvarb:
setattr(gmod.indxpara, strgvarb, np.zeros(gmod.numbpopl, dtype=int) - 1)
for l in gmod.indxpopl:
strgpopl = 'pop%d' % l
if gmod.maxmpara.numbelem[l] > 0:
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if gmod.listscalparagenrelem[l][k] == 'self':
continue
indx = cntr.incr()
if gmod.listscalparagenrelem[l][k] == 'dpow':
for nametemp in ['brek', 'sloplowr', 'slopuppr']:
strg = '%s' % nametemp + nameparagenrelem
setattr(gmod.indxpara, strg, indx)
setattr(gmod.indxpara, strg, indx)
else:
if gmod.listscalparagenrelem[l][k] == 'expo' or gmod.listscalparagenrelem[l][k] == 'dexp':
strghypr = 'scal'
if gmod.listscalparagenrelem[l][k] == 'powr':
strghypr = 'slop'
if gmod.listscalparagenrelem[l][k] == 'gausmean' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \
gmod.listscalparagenrelem[l][k] == 'lnormean' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':
strghypr = 'mean'
if gmod.listscalparagenrelem[l][k] == 'gausstdv' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \
gmod.listscalparagenrelem[l][k] == 'lnorstdv' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':
strghypr = 'stdv'
strg = strghypr + 'prio' + nameparagenrelem + 'pop%d' % l
setattr(gmod.indxpara, strg, indx)
# group PSF parameters
if gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full':
for m in gdat.indxevtt:
for i in gdat.indxener:
setattr(gmod.indxpara, 'sigcen%02devt%d' % (i, m), cntr.incr())
if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':
setattr(gmod.indxpara, 'gamcen%02devt%d' % (i, m), cntr.incr())
if gmod.typemodlpsfn == 'doubking':
setattr(gmod.indxpara, 'sigten%02devt%d' % (i, m), cntr.incr())
setattr(gmod.indxpara, 'gamten%02devt%d' % (i, m), cntr.incr())
setattr(gmod.indxpara, 'ffenen%02devt%d' % (i, m), cntr.incr())
gmod.indxpara.psfp = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg.startswith('sigce') or strg.startswith('sigte') or strg.startswith('gamce') or strg.startswith('gamte') or strg.startswith('psffe'):
gmod.indxpara.psfp.append(valu)
gmod.indxpara.psfp = np.array(gmod.indxpara.psfp)
gmod.numbpsfptotlevtt = gdat.numbevtt * gmod.numbpsfptotl
gmod.numbpsfptotlener = gdat.numbener * gmod.numbpsfptotl
numbpsfp = gmod.numbpsfptotl * gdat.numbener * gdat.numbevtt
indxpsfpform = np.arange(numbpsfpform)
indxpsfptotl = np.arange(gmod.numbpsfptotl)
indxpsfp = np.arange(numbpsfp)
gmod.indxpara.psfp = np.sort(gmod.indxpara.psfp)
gmod.indxparapsfpinit = gmod.indxpara.psfp[0]
# group background parameters
gmod.indxpara.bacp = []
for c in gmod.indxback:
if gmod.boolspecback[c]:
indx = cntr.incr()
setattr(gmod.indxpara, 'bacpback%04d' % c, indx)
gmod.indxpara.bacp.append(indx)
else:
for i in gdat.indxener:
indx = cntr.incr()
setattr(gmod.indxpara, 'bacpback%04den%02d' % (c, i), indx)
gmod.indxpara.bacp.append(indx)
gmod.indxpara.bacp = np.array(gmod.indxpara.bacp)
# temp
#gmod.indxpara.anglsour = []
#gmod.indxpara.anglhost = []
#gmod.indxpara.angllens = []
if gmod.typeemishost != 'none':
gmod.indxpara.specsour = []
gmod.indxpara.spechost = []
if gmod.boollens:
gmod.indxpara.lgalsour = cntr.incr()
gmod.indxpara.bgalsour = cntr.incr()
gmod.indxpara.fluxsour = cntr.incr()
if gdat.numbener > 1:
gmod.indxpara.sindsour = cntr.incr()
gmod.indxpara.sizesour = cntr.incr()
gmod.indxpara.ellpsour = cntr.incr()
gmod.indxpara.anglsour = cntr.incr()
if gmod.typeemishost != 'none' or gmod.boollens:
for e in gmod.indxsersfgrd:
if gmod.typeemishost != 'none':
setattr(gmod.indxpara, 'lgalhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'bgalhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'fluxhostisf%d' % e, cntr.incr())
if gdat.numbener > 1:
setattr(gmod.indxpara, 'sindhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'sizehostisf%d' % e, cntr.incr())
if gmod.boollens:
setattr(gmod.indxpara, 'beinhostisf%d' % e, cntr.incr())
if gmod.typeemishost != 'none':
setattr(gmod.indxpara, 'ellphostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'anglhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'serihostisf%d' % e, cntr.incr())
if gmod.boollens:
gmod.indxpara.sherextr = cntr.incr()
gmod.indxpara.sangextr = cntr.incr()
gmod.indxpara.sour = []
if gmod.boollens and gmod.typeemishost == 'none':
raise Exception('Lensing cannot be modeled without host galaxy emission.')
# collect groups of parameters
if gdat.typeexpr == 'hubb':
gmod.listnamecomplens = ['hostlght', 'hostlens', 'sour', 'extr']
for namecomplens in gmod.listnamecomplens:
setattr(gmod, 'liststrg' + namecomplens, [])
setattr(gmod.indxpara, namecomplens, [])
if gmod.boollens or gmod.typeemishost != 'none':
gmod.liststrghostlght += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']
gmod.liststrghostlens += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']
if gmod.typeemishost != 'none':
gmod.liststrghostlght += ['fluxhost', 'sizehost', 'serihost']
if gdat.numbener > 1:
gmod.liststrghostlght += ['sindhost']
if gmod.boollens:
gmod.liststrghostlens += ['beinhost']
gmod.liststrgextr += ['sherextr', 'sangextr']
gmod.liststrgsour += ['lgalsour', 'bgalsour', 'fluxsour', 'sizesour', 'ellpsour', 'anglsour']
if gdat.numbener > 1:
gmod.liststrgsour += ['sindsour']
for strg, valu in gmod.__dict__.items():
if isinstance(valu, list) or isinstance(valu, np.ndarray):
continue
if gdat.typeexpr == 'hubb':
for namecomplens in gmod.listnamecomplens:
for strgtemp in getattr(gmod, 'liststrg' + namecomplens):
if strg[12:].startswith(strgtemp):
if isinstance(valu, list):
for valutemp in valu:
gmod['indxparagenr' + namecomplens].append(valutemp)
else:
gmod['indxparagenr' + namecomplens].append(valu)
# remove indxpara. from strg
strg = strg[12:]
if strg.startswith('fluxsour') or strg.startswith('sindsour'):
gmod.indxpara.specsour.append(valu)
if strg.startswith('fluxhost') or strg.startswith('sindhost'):
gmod.indxpara.spechost.append(valu)
if gmod.boollens or gmod.boolhost:
gmod.indxpara.host = gmod.indxparahostlght + gmod.indxparahostlens
gmod.indxpara.lens = gmod.indxpara.host + gmod.indxpara.sour + gmod.indxpara.extr
## number of model spectral parameters for each population
#numbspep = np.empty(gmod.numbpopl, dtype=int)
#liststrgspep = [[] for l in range(gmod.numbpopl)]
#for l in gmod.indxpopl:
# if gdat.numbener > 1:
# liststrgspep[l] += ['sind']
# if gmod.spectype[l] == 'expc':
# liststrgspep[l] += ['expc']
# if gmod.spectype[l] == 'curv':
# liststrgspep[l] = ['curv']
# numbspep[l] = len(liststrgspep[l])
def setp_paragenrscalbase(gdat, strgmodl='fitt'):
'''
Setup labels and scales for base parameters
'''
print('setp_paragenrscalbase(): Building the %s model base paremeter names and scales...' % strgmodl)
gmod = getattr(gdat, strgmodl)
listlablback = []
listlablback = []
for nameback in gmod.listnameback:
if nameback == 'isot':
listlablback.append('Isotropic')
listlablback.append(r'$\mathcal{I}$')
if nameback == 'fdfm':
listlablback.append('FDM')
listlablback.append(r'$\mathcal{D}$')
if nameback == 'dark':
listlablback.append('NFW')
listlablback.append(r'$\mathcal{D}_{dark}$')
if nameback == 'part':
listlablback.append('Particle Back.')
listlablback.append(r'$\mathcal{I}_p$')
# background templates
listlablsbrt = deepcopy(listlablback)
numblablsbrt = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrt[l]:
listlablsbrt.append(gmod.lablpopl[l])
listlablsbrt.append(gmod.lablpopl[l] + ' subt')
numblablsbrt += 2
if gmod.boollens:
listlablsbrt.append('Source')
numblablsbrt += 1
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
listlablsbrt.append('Host %d' % e)
numblablsbrt += 1
if gmod.numbpopl > 0:
if 'clus' in gmod.typeelem or 'clusvari' in gmod.typeelem:
listlablsbrt.append('Uniform')
numblablsbrt += 1
listlablsbrtspec = ['Data']
listlablsbrtspec += deepcopy(listlablsbrt)
if len(listlablsbrt) > 1:
listlablsbrtspec.append('Total Model')
numblablsbrtspec = len(listlablsbrtspec)
# number of generative parameters per element, depends on population
#numbparaelem = gmod.numbparagenrelem + numbparaelemderi
# maximum total number of parameters
#numbparagenrfull = gmod.numbparagenrbase + gmod.numbparaelem
#numbparaelemkind = gmod.numbparagenrbase
#for l in gmod.indxpopl:
# numbparaelemkind += gmod.numbparagenrelemsing[l]
#nameparagenrbase
#gmod.namepara.genrelem
#listnameparaderifixd
#listnameparaderielem
#gmod.namepara.genrelemextd = gmod.namepara.genrelem * maxm.numbelem
#listnameparaderielemextd = gmod.namepara.genrelem * maxm.numbelem
gmod.listindxparakindscal = {}
for scaltype in gdat.listscaltype:
gmod.listindxparakindscal[scaltype] = np.where(scaltype == gmod.listscalparakind)[0]
#
## stack
## gmod.listnameparastck
#gmod.listnameparastck = np.zeros(gmod.maxmnumbpara, dtype=object)
#gmod.listscalparastck = np.zeros(gmod.maxmnumbpara, dtype=object)
#
#gmod.listnameparastck[gmod.indxparagenrbase] = gmod.nameparagenrbase
#gmod.listscalparastck[gmod.indxparagenrbase] = gmod.listscalparagenrbase
#for k in range(gmod.numbparaelem):
# for l in gmod.indxpopl:
# if k >= gmod.numbparagenrelemcuml[l]:
# indxpopltemp = l
# indxelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) // gmod.numbparagenrelemsing[indxpopltemp]
# gmod.indxparagenrelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) % gmod.numbparagenrelemsing[indxpopltemp]
# break
# gmod.listnameparastck[gmod.numbparagenrbase+k] = '%spop%d%04d' % (gmod.namepara.genrelem[indxpopltemp][gmod.indxparagenrelemtemp], indxpopltemp, indxelemtemp)
# gmod.listscalparastck[gmod.numbparagenrbase+k] = gmod.listscalparagenrelem[indxpopltemp][gmod.indxparagenrelemtemp]
#
#
#if np.where(gmod.listscalpara == 0)[0].size > 0:
# print('gmod.listscalpara[gmod.indxparagenrbase]')
# print(gmod.listscalpara[gmod.indxparagenrbase])
# raise Exception('')
#
## labels and scales for variables
if gmod.boollens:
setattr(gmod.lablrootpara, 'masssubhintg', r'$M_{\rm{sub}}$')
setattr(gmod.lablrootpara, 'masssubhdelt', r'$\rho_{\rm{sub}}$')
setattr(gmod.lablrootpara, 'masssubhintgbein', r'$M_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'masssubhdeltbein', r'$\rho_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'masssubhintgunit', '$10^9 M_{\odot}$')
setattr(gmod.lablrootpara, 'masssubhdeltunit', '$M_{\odot}$/kpc')
setattr(gmod.lablrootpara, 'masssubhintgbeinunit', '$10^9 M_{\odot}$')
setattr(gmod.lablrootpara, 'masssubhdeltbeinunit', '$M_{\odot}$/kpc')
setattr(gmod.lablrootpara, 'fracsubhintg', r'f_{\rm{sub}}')
setattr(gmod.lablrootpara, 'fracsubhdelt', r'f_{\rho,\rm{sub}}')
setattr(gmod.lablrootpara, 'fracsubhintgbein', r'$f_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'fracsubhdeltbein', r'$f_{\rho,\rm{sub,E}}$')
for e in gmod.indxsersfgrd:
setattr(gmod.lablrootpara, 'masshostisf%dbein' % e, r'$M_{\rm{hst,%d,C}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%dintg' % e, r'$M_{\rm{hst,%d<}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%ddelt' % e, r'$M_{\rm{hst,%d}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%dintgbein' % e, r'$M_{\rm{hst,E,%d<}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%ddeltbein' % e, r'$M_{\rm{hst,E,%d}}$' % e)
for namevarb in ['fracsubh', 'masssubh']:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
for nameeval in ['', 'bein']:
setattr(gdat, 'scal' + namevarb + strgcalcmasssubh + nameeval, 'logt')
for e in gmod.indxsersfgrd:
setattr(gdat, 'scalmasshostisf%d' % e + 'bein', 'logt')
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
for nameeval in ['', 'bein']:
setattr(gdat, 'scalmasshostisf%d' % e + strgcalcmasssubh + nameeval, 'logt')
# scalar variable setup
gdat.lablhistcntplowrdfncsubten00evt0 = 'N_{pix,l}'
gdat.lablhistcntphigrdfncsubten00evt0 = 'N_{pix,h}'
gdat.lablhistcntplowrdfncen00evt0 = 'N_{pix,l}'
gdat.lablhistcntphigrdfncen00evt0 = 'N_{pix,h}'
gdat.lablbooldfncsubt = 'H'
gdat.lablpriofactdoff = r'$\alpha_{p}$'
gmod.scalpriofactdoff = 'self'
gdat.minmreds = 0.
gdat.maxmreds = 1.5
gdat.minmmagt = 19.
gdat.maxmmagt = 28.
gmod.scalpara.numbelem = 'logt'
gmod.scalpara.lliktotl = 'logt'
gdat.lablener = 'E'
#gdat.lablenertotl = '$%s$ [%s]' % (gdat.lablener, gdat.strgenerunit)
# width of the Gaussian clusters
gdat.lablgwdt = r'\sigma_G'
gdat.lablgang = r'\theta'
gdat.lablaang = r'\phi'
gdat.labllgalunit = gdat.lablgangunit
gdat.lablbgalunit = gdat.lablgangunit
gdat.lablanglfromhost = r'\theta_{\rm{0,hst}}'
gdat.lablanglfromhostunit = gdat.lablgangunit
gdat.labldefs = r'\alpha_s'
gdat.lablflux = 'f'
gdat.lablnobj = 'p'
gdat.lablelin = r'\mathcal{E}'
gdat.lablsbrt = r'\Sigma'
gdat.labldeflprof = r'\alpha_a'
gdat.labldeflprofunit = u'$^{\prime\prime}$'
gdat.strgenerkevv = 'keV'
gdat.strgenergevv = 'GeV'
gdat.strgenerergs = 'erg'
gdat.strgenerimum = '\mu m^{-1}'
gdat.labldefsunit = u'$^{\prime\prime}$'
gdat.lablprat = 'cm$^{-2}$ s$^{-1}$'
### labels for derived fixed dimensional parameters
if gdat.boolbinsener:
for i in gdat.indxener:
setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubten%02d' % i, 'f_{D/ST,%d}' % i)
else:
gmod.lablrootpara.fracsdenmeandarkdfncsubt = 'f_{D/ST}'
setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubt', 'f_{D/ST}')
### labels for background units
if gdat.typeexpr == 'ferm':
for nameenerscaltype in ['en00', 'en01', 'en02', 'en03']:
for labltemptemp in ['flux', 'sbrt']:
# define the label
if nameenerscaltype == 'en00':
strgenerscal = '%s' % labltemp
if nameenerscaltype == 'en01':
strgenerscal = 'E%s' % labltemp
if nameenerscaltype == 'en02':
strgenerscal = 'E^2%s' % labltemp
if nameenerscaltype == 'en03':
strgenerscal = '%s' % labltemp
labl = '%s' % strgenerscal
for nameenerunit in ['gevv', 'ergs', 'kevv', 'imum']:
strgenerunit = getattr(gdat, 'strgener' + nameenerunit)
if nameenerscaltype == 'en00':
strgenerscalunit = '%s$^{-1}$' % strgenerunit
if nameenerscaltype == 'en01':
strgenerscalunit = ''
if nameenerscaltype == 'en02':
strgenerscalunit = '%s' % strgenerunit
if nameenerscaltype == 'en03':
strgenerscalunit = '%s' % strgenerunit
# define the label unit
for namesoldunit in ['ster', 'degr']:
if labltemptemp == 'flux':
lablunit = '%s %s' % (strgenerscalunit, gdat.lablprat)
setattr(gmod.lablunitpara, 'lablflux' + nameenerscaltype + nameenerunit + 'unit', lablunit)
else:
if namesoldunit == 'ster':
lablunit = '%s %s sr$^{-1}$' % (strgenerscalunit, gdat.lablprat)
if namesoldunit == 'degr':
lablunit = '%s %s deg$^{-2}$' % (strgenerscalunit, gdat.lablprat)
setattr(gmod.lablunitpara, 'sbrt' + nameenerscaltype + nameenerunit + namesoldunit + 'unit', lablunit)
if gdat.boolbinsener:
gdat.lablfluxunit = getattr(gmod.lablunitpara, 'fluxen00' + gdat.nameenerunit + 'unit')
gdat.lablsbrtunit = getattr(gmod.lablunitpara, 'sbrten00' + gdat.nameenerunit + 'sterunit')
gdat.lablexpo = r'$\epsilon$'
gdat.lablexpounit = 'cm$^2$ s'
gdat.lablprvl = '$p$'
gdat.lablreds = 'z'
gdat.lablmagt = 'm_R'
gdat.lablper0 = 'P_0'
gmod.scalper0plot = 'logt'
gdat.labldglc = 'd_{gc}'
gmod.scaldglcplot = 'logt'
gdat.labldlos = 'd_{los}'
gmod.scaldlosplot = 'logt'
if gdat.typeexpr == 'ferm':
gdat.labldlosunit = 'kpc'
gdat.labllumi = r'L_{\gamma}'
if gdat.typeexpr == 'chan':
gdat.labldlosunit = 'Mpc'
gdat.labllumi = r'L_{X}'
gdat.labllum0 = r'L_{X, 0}'
gdat.lablgeff = r'\eta_{\gamma}'
gmod.scalgeffplot = 'logt'
gmod.scallumiplot = 'logt'
gdat.labllumiunit = 'erg s$^{-1}$'
gdat.labllum0unit = 'erg s$^{-1}$'
gdat.lablthet = r'\theta_{gc}'
gmod.scalthetplot = 'self'
gdat.lablphii = r'\phi_{gc}'
gmod.scalphiiplot = 'self'
setattr(gmod.lablrootpara, 'magf', 'B')
setattr(gdat, 'scalmagfplot', 'logt')
setattr(gmod.lablrootpara, 'per1', 'P_1')
if gdat.typedata == 'inpt':
gdat.minmpara.per0 = 1e-3
gdat.maxmpara.per0 = 1e1
gdat.minmpara.per1 = 1e-20
gdat.maxmpara.per1 = 1e-10
gdat.minmpara.per1 = 1e-20
gdat.maxmpara.per1 = 1e-10
gdat.minmpara.flux0400 = 1e-1
gdat.maxmpara.flux0400 = 1e4
setattr(gdat, 'scalper1plot', 'logt')
setattr(gmod.lablrootpara, 'flux0400', 'S_{400}')
setattr(gdat, 'scalflux0400plot', 'logt')
for q in gdat.indxrefr:
setattr(gmod.lablrootpara, 'aerr' + gdat.listnamerefr[q], '\Delta_{%d}' % q)
gdat.lablsigm = '\sigma_l'
gdat.lablgamm = '\gamma_l'
gdat.lablbcom = '\eta'
gdat.lablinfopost = 'D_{KL}'
gdat.lablinfopostunit = 'nat'
gdat.lablinfoprio = 'D_{KL,pr}'
gdat.lablinfopriounit = 'nat'
gdat.labllevipost = '\ln P(D)'
gdat.labllevipostunit = 'nat'
gdat.lablleviprio = '\ln P_{pr}(D)'
gdat.labllevipriounit = 'nat'
gdat.lablsind = 's'
if gdat.boolbinsener:
for i in gdat.indxenerinde:
setattr(gmod.lablrootpara, 'sindcolr%04d' % i, 's_%d' % i)
gdat.lablexpcunit = gdat.strgenerunit
gdat.labllliktotl = r'\ln P(D|M)'
gdat.labllpripena = r'\ln P(N)'
gdat.lablasca = r'\theta_s'
gdat.lablascaunit = gdat.lablgangunit
gdat.lablacut = r'\theta_c'
gdat.lablacutunit = gdat.lablgangunit
gdat.lablmcut = r'M_{c,n}'
gdat.lablmcutunit = r'$M_{\odot}$'
gdat.lablmcutcorr = r'\bar{M}_{c,n}'
gdat.lablmcutcorrunit = r'$M_{\odot}$'
gdat.lablspec = gdat.lablflux
gdat.lablspecunit = gdat.lablfluxunit
gdat.lablspecplot = gdat.lablflux
gdat.lablspecplotunit = gdat.lablfluxunit
gdat.lablcnts = 'C'
gdat.labldeltllik = r'\Delta_n \ln P(D|M)'
gdat.labldiss = r'\theta_{sa}'
gdat.labldissunit = gdat.lablgangunit
gdat.lablrele = r'\langle|\vec{\alpha}_n \cdot \vec{\nabla} k_l| \rangle'
gdat.lablrelc = r'\langle\vec{\alpha}_n \cdot \vec{\nabla} k_l \rangle'
gdat.lablreld = r'\langle|\vec{\alpha}_n \cdot \vec{\nabla} k_d| \rangle'
gdat.lablreln = r'\langle \Delta \theta_{pix} |\hat{\alpha}_n \cdot \vec{\nabla} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelm = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelk = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelf = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle / k_m'
for q in gdat.indxrefr:
for l in gmod.indxpopl:
setp_varb(gdat, 'fdispop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$F_{%d%d}$' % (l, q))
setp_varb(gdat, 'cmplpop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$C_{%d%d}$' % (l, q))
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
gdat.minmspec = 1e-2
gdat.maxmspec = 1e1
else:
gdat.minmspec = 1e-11
gdat.maxmspec = 1e-7
else:
gdat.minmspec = 1e-11
gdat.maxmspec = 1e-7
if gdat.typeexpr == 'ferm':
gdat.minmlumi = 1e32
gdat.maxmlumi = 1e36
elif gdat.typeexpr == 'chan':
if gdat.typedata == 'inpt':
gdat.minmlum0 = 1e42
gdat.maxmlum0 = 1e46
gdat.minmlumi = 1e41
gdat.maxmlumi = 1e45
try:
gdat.minmdlos
except:
if gdat.typeexpr == 'chan':
gdat.minmdlos = 1e7
gdat.maxmdlos = 1e9
else:
gdat.minmdlos = 6e3
gdat.maxmdlos = 1.1e4
if gdat.typeexpr == 'ferm':
gdat.minmcnts = 1e1
gdat.maxmcnts = 1e5
if gdat.typeexpr == 'chan':
if gdat.numbpixlfull == 1:
gdat.minmcnts = 1e4
gdat.maxmcnts = 1e8
else:
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
if gdat.typeexpr == 'hubb':
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
if gdat.typeexpr == 'fire':
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
gdat.minmspecplot = gdat.minmspec
gdat.maxmspecplot = gdat.maxmspec
gdat.minmdeltllik = 1.
gdat.maxmdeltllik = 1e3
gdat.minmdiss = 0.
gdat.maxmdiss = gdat.maxmgangdata * np.sqrt(2.)
gdat.minmrele = 1e-3
gdat.maxmrele = 1e1
gdat.minmreln = 1e-3
gdat.maxmreln = 1.
gdat.minmrelk = 1e-3
gdat.maxmrelk = 1.
gdat.minmrelf = 1e-5
gdat.maxmrelf = 1e-1
gdat.minmrelm = 1e-3
gdat.maxmrelm = 1e1
gdat.minmreld = 1e-3
gdat.maxmreld = 1e1
gdat.minmrelc = 1e-3
gdat.maxmrelc = 1.
gdat.minmmcut = 3e7
gdat.maxmmcut = 2e9
gdat.minmmcutcorr = gdat.minmmcut
gdat.maxmmcutcorr = gdat.maxmmcut
if gdat.boolbinsspat:
gdat.minmbein = 0.
gdat.maxmbein = 1. / gdat.anglfact
# scalar variables
if gdat.boolbinsspat:
gdat.minmdeflprof = 1e-3 / gdat.anglfact
gdat.maxmdeflprof = 0.1 / gdat.anglfact
#gdat.minmfracsubh = 0.
#gdat.maxmfracsubh = 0.3
#gmod.scalfracsubh = 'self'
#gdat.minmmasshost = 1e10
#gdat.maxmmasshost = 1e13
#gmod.scalmasshost = 'self'
#
#gdat.minmmasssubh = 1e8
#gdat.maxmmasssubh = 1e10
#gmod.scalmasssubh = 'self'
# collect groups of parameter indices into lists
## labels and scales for base parameters
gmod.nameparagenrbase = []
for name, k in gmod.indxpara.__dict__.items():
if not np.isscalar(k):
print('name')
print(name)
print('temp: no nonscalar should be here!')
continue
gmod.nameparagenrbase.append(name)
gmod.numbparagenrbase = len(gmod.nameparagenrbase)
gmod.indxparagenrbase = np.arange(gmod.numbparagenrbase)
gmod.indxparagenrbasestdv = gmod.indxparagenrbase[gmod.numbpopl:]
## list of scalar variable names
gmod.namepara.scal = list(gmod.nameparagenrbase)
gmod.namepara.scal += ['lliktotl']
# derived parameters
print('Determining the list of derived, fixed-dimensional parameter names...')
gmod.namepara.genrelemextd = [[[] for g in gmod.indxparagenrelemsing[l]] for l in gmod.indxpopl]
gmod.namepara.derielemextd = [[[] for k in gmod.indxparaderielemsing[l]] for l in gmod.indxpopl]
gmod.namepara.genrelemflat = []
gmod.namepara.derielemflat = []
gmod.namepara.genrelemextdflat = []
gmod.namepara.derielemextdflat = []
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
gmod.namepara.genrelemflat.append(gmod.namepara.genrelem[l][g] + 'pop%d' % l)
for d in range(gmod.maxmpara.numbelem[l]):
gmod.namepara.genrelemextd[l][g].append(gmod.namepara.genrelem[l][g] + 'pop%d' % l + '%04d' % d)
gmod.namepara.genrelemextdflat.append(gmod.namepara.genrelemextd[l][g][d])
for k in gmod.indxparaderielemsing[l]:
gmod.namepara.derielemflat.append(gmod.namepara.derielem[l][k] + 'pop%d' % l)
for d in range(gmod.maxmpara.numbelem[l]):
gmod.namepara.derielemextd[l][k].append(gmod.namepara.derielem[l][k] + 'pop%d' % l + '%04d' % d)
gmod.namepara.derielemextdflat.append(gmod.namepara.derielemextd[l][k][d])
# list of element parameter names (derived and generative), counting label-degenerate element parameters only once
gmod.namepara.elem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.namepara.elem[l].extend(gmod.namepara.genrelem[l])
gmod.namepara.elem[l].extend(gmod.namepara.derielem[l])
gmod.namepara.elemflat = []
for l in gmod.indxpopl:
gmod.namepara.elemflat.extend(gmod.namepara.elem[l])
gmod.namepara.genrelemdefa = deepcopy(gmod.namepara.elemflat)
if gmod.boolelemlghtanyy:
for strgfeat in ['sind', 'curv', 'expc'] + ['sindcolr%04d' % i for i in gdat.indxenerinde]:
if not strgfeat in gmod.namepara.genrelemdefa:
gmod.namepara.genrelemdefa.append(strgfeat)
# list of flattened generative element parameter names, counting label-degenerate element parameters only once
gmod.namepara.genrelemkind = gmod.namepara.genrelemflat + gmod.namepara.derielemflat
gmod.numbparagenrelemkind = len(gmod.namepara.genrelemkind)
#gmod.inxparagenrscalelemkind = np.arange(gmod.numbparagenrelemkind)
gmod.inxparagenrscalelemkind = tdpy.gdatstrt()
gmod.numbparagenrelemextdflat = len(gmod.namepara.genrelemextdflat)
gmod.indxparagenrelemextdflat = np.arange(gmod.numbparagenrelemextdflat)
# list of parameter names (derived and generative), counting label-degenerate element parameters only once, element lists flattened
gmod.namepara.kind = gmod.nameparagenrbase + gmod.listnameparaderitotl + gmod.namepara.genrelemflat + gmod.namepara.derielemflat
gmod.numbparakind = len(gmod.namepara.kind)
gmod.indxparakind = np.arange(gmod.numbparakind)
# list of generative parameter names, separately including all label-degenerate element parameters, element lists flattened
gmod.namepara.genrscalfull = gmod.nameparagenrbase + gmod.namepara.genrelemextdflat
gmod.namepara.genrscalfull = np.array(gmod.namepara.genrscalfull)
gmod.numbparagenrfull = len(gmod.namepara.genrscalfull)
gmod.indxparagenrfull = np.arange(gmod.numbparagenrfull)
# list of generative parameter names, counting label-degenerate element parameters only once, element lists flattened
gmod.listnameparagenrscal = gmod.nameparagenrbase + gmod.namepara.genrelemflat
gmod.numbparagenr = len(gmod.listnameparagenrscal)
gmod.indxparagenr = np.arange(gmod.numbparagenr)
# list of parameter names (derived and generative), element lists flattened
gmod.listnameparatotl = gmod.nameparagenrbase + gmod.listnameparaderitotl + \
gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat
gmod.nameparagenrbase = np.array(gmod.nameparagenrbase)
for e in gmod.indxsersfgrd:
gmod.namepara.scal += ['masshost' + strgsersfgrd + 'bein']
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
gmod.namepara.scal += ['masshost' + strgsersfgrd + strgcalcmasssubh + 'bein']
if gmod.numbparaelem > 0:
if gmod.boollenssubh:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
gmod.namepara.scal += ['masssubh' + strgcalcmasssubh + 'bein', 'fracsubh' + strgcalcmasssubh + 'bein']
if gmod.numbparaelem > 0:
gmod.namepara.scal += ['lpripena']
if False and gmod.boolelemsbrtdfncanyy:
for strgbins in ['lowr', 'higr']:
gmod.namepara.scal += ['histcntp%sdfncen00evt0' % strgbins]
gmod.namepara.scal += ['histcntp%sdfncsubten00evt0' % strgbins]
for i in gdat.indxener:
gmod.namepara.scal += ['fracsdenmeandarkdfncsubten%02d' % i]
gmod.namepara.scal += ['booldfncsubt']
if gmod.numbparaelem > 0:
for q in gdat.indxrefr:
if gdat.boolasscrefr[q]:
for l in gmod.indxpopl:
gmod.namepara.scal += ['cmplpop%dpop%d' % (l, q)]
gmod.namepara.scal += ['fdispop%dpop%d' % (q, l)]
gmod.numbvarbscal = len(gmod.namepara.scal)
gmod.indxvarbscal = np.arange(gmod.numbvarbscal)
# determine total label
gmod.listnameparaglob = gmod.namepara.kind + gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat
gmod.listnameparaglob += ['cntpmodl']
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
if not gmod.namepara.genrelem[l][g] in gmod.listnameparaglob:
gmod.listnameparaglob.append(gmod.namepara.genrelem[l][g])
gmod.listnameparaglob.append(gmod.namepara.derielem[l][g])
for name in gmod.listnameparaglob:
lablroot = getattr(gmod.lablrootpara, name)
lablunit = getattr(gmod.lablunitpara, name)
labltotl = tdpy.retr_labltotlsing(lablroot, lablunit)
setattr(gmod.labltotlpara, name, labltotl)
# define fact
for l in gmod.indxpopl:
for k in gmod.indxparakind:
name = gmod.namepara.kind[k]
scal = getattr(gmod.scalpara, name)
if scal == 'self' or scal == 'logt':
minm = getattr(gmod.minmpara, name)
maxm = getattr(gmod.maxmpara, name)
if scal == 'self':
fact = maxm - minm
if scal == 'logt':
fact = np.log(maxm / minm)
if fact == 0:
print('name')
print(name)
raise Exception('')
setattr(gmod.factpara, name, fact)
if gmod.numbparaelem > 0:
gmod.indxparagenrfulleleminit = gmod.indxparagenrbase[-1] + 1
else:
gmod.indxparagenrfulleleminit = -1
## arrays of parameter features (e.g., minm, maxm, labl, scal, etc.)
for featpara in gdat.listfeatparalist:
gmodfeat = getattr(gmod, featpara + 'para')
### elements
#for strgtypepara in gdat.liststrgtypepara:
# listname = getattr(gmod.namepara, strgtypepara + 'elem')
# listfeat = [[] for l in gmod.indxpopl]
# listfeatflat = []
# for l in gmod.indxpopl:
#
# numb = getattr(gmod, 'numbpara' + strgtypepara + 'elemsing')[l]
# listfeat[l] = [[] for k in range(numb)]
# for k in range(numb):
# scal = getattr(gmod.scalpara, listname[l][k])
# if featpara == 'fact' and not (scal == 'self' or scal == 'logt'):
# continue
# if featpara == 'mean' and (scal != 'gaus' and scal != 'lnor'):
# continue
# if featpara == 'stdv' and (scal != 'gaus' and scal != 'lnor'):
# continue
#
# if strgtypepara == 'genr':
# strgextn = 'pop%d' % l
# else:
# strgextn = ''
# print('featpara')
# print(featpara)
# print('listname')
# print(listname)
# listfeat[l][k] = getattr(gmodfeat, listname[l][k] + strgextn)
# listfeatflat.append(listfeat[l][k])
# setattr(gmodfeat, strgtypepara + 'elem', listfeat)
# setattr(gmodfeat, strgtypepara + 'elemflat', listfeatflat)
### groups of parameters inside the parameter vector
### 'base': all fixed-dimensional generative parameters
### 'full': all generative parameters
for strggroppara in ['base', 'full']:
indx = getattr(gmod, 'indxparagenr' + strggroppara)
feat = [0. for k in indx]
for attr, valu in gmod.indxpara.__dict__.items():
if not np.isscalar(valu):
continue
scal = getattr(gmod.scalpara, attr)
if not (scal == 'self' or scal == 'logt') and featpara == 'fact':
continue
if scal != 'gaus' and (featpara == 'mean' or featpara == 'stdv'):
print('Mean or Std for non-Gaussian')
continue
if featpara == 'name':
feat[valu] = attr
else:
feat[valu] = getattr(gmodfeat, attr)
feat = np.array(feat)
setattr(gmodfeat, 'genr' + strggroppara, feat)
#print('gmod.minmpara')
#for attr, varb in gmod.minmpara.__dict__.items():
# print(attr, varb)
#print('gmod.maxmpara')
#for attr, varb in gmod.maxmpara.__dict__.items():
# print(attr, varb)
#print('gmod.scalpara')
#for attr, varb in gmod.scalpara.__dict__.items():
# print(attr, varb)
#raise Exception('')
## population groups
### number of elements
for strgvarb in ['numbelem', 'meanelem']:
listindxpara = []
if strgmodl == 'true':
listpara = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg.startswith(strgvarb + 'p'):
listindxpara.append(valu)
if strgmodl == 'true':
listpara.append(getattr(gmod.this, strg))
listindxpara = np.array(listindxpara)
setattr(gmod.indxpara, strgvarb, listindxpara)
if strgmodl == 'true':
listpara = np.array(listpara)
setattr(gmod, strgvarb, listpara)
### parameters of priors for element parameters
gmod.indxpara.prioelem = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg == 'dist' and np.isscalar(valu):
gmod.indxpara.prioelem.append(valu)
gmod.indxpara.prioelem = np.array(gmod.indxpara.prioelem)
### hyperparameters
if gmod.typemodltran == 'pois':
gmod.indxpara.hypr = np.array(list(gmod.indxpara.prioelem) + list(gmod.indxpara.meanelem))
else:
gmod.indxpara.hypr = gmod.indxpara.prioelem
## generative base parameter indices for each scaling
gmod.listindxparagenrbasescal = dict()
for scaltype in gdat.listscaltype:
gmod.listindxparagenrbasescal[scaltype] = np.where(np.array(gmod.scalpara.genrbase) == scaltype)[0]
if gdat.booldiagmode:
if np.where(gmod.scalpara.genrfull == 0)[0].size > 0:
raise Exception('')
def plot_lens(gdat):
if gmod.boolelemdeflsubh:
xdat = gdat.binspara.angl[1:] * gdat.anglfact
lablxdat = gdat.labltotlpara.gang
listdeflscal = np.array([4e-2, 4e-2, 4e-2]) / gdat.anglfact
listanglscal = np.array([0.05, 0.1, 0.05]) / gdat.anglfact
listanglcutf = np.array([1., 1., 10.]) / gdat.anglfact
listasym = [False, False, False]
listydat = []
for deflscal, anglscal, anglcutf, asym in zip(listdeflscal, listanglscal, listanglcutf, listasym):
listydat.append(retr_deflcutf(gdat.binspara.angl[1:], deflscal, anglscal, anglcutf, asym=asym) * gdat.anglfact)
for scalxdat in ['self', 'logt']:
path = gdat.pathinitintr + 'deflcutf' + scalxdat + '.pdf'
tdpy.plot_gene(path, xdat, listydat, scalxdat=scalxdat, scalydat='logt', lablxdat=lablxdat, \
lablydat=r'$\alpha_n$ [$^{\prime\prime}$]', limtydat=[1e-3, 1.5e-2], limtxdat=[None, 2.])
# pixel-convoltuion of the Sersic profile
# temp -- y axis labels are wrong, should be per solid angle
xdat = gdat.binspara.lgalsers * gdat.anglfact
for n in range(gdat.numbindxsers + 1):
for k in range(gdat.numbhalfsers + 1):
if k != 5:
continue
path = gdat.pathinitintr + 'sersprofconv%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, gdat.sersprof[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])
#path = gdat.pathinitintr + 'sersprofcntr%04d%04d.pdf' % (n, k)
#tdpy.plot_gene(path, xdat, gdat.sersprofcntr[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])
path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], \
scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])
path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], scalxdat='logt', \
scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])
xdat = gdat.binspara.angl * gdat.anglfact
listspec = np.array([1e-19, 1e-18, 1e-18, 1e-18]) / gdat.anglfact
listsize = np.array([0.3, 1., 1., 1.]) / gdat.anglfact
listindx = np.array([4., 2., 4., 10.])
listydat = []
listlabl = []
for spec, size, indx in zip(listspec, listsize, listindx):
listydat.append(spec * retr_sbrtsersnorm(gdat.binspara.angl, size, indxsers=indx))
listlabl.append('$R_e = %.3g ^{\prime\prime}, n = %.2g$' % (size * gdat.anglfact, indx))
path = gdat.pathinitintr + 'sersprof.pdf'
tdpy.plot_gene(path, xdat, listydat, scalxdat='logt', scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, \
listlegd=listlegd, listhlin=1e-7, limtydat=[1e-8, 1e0])
minmredshost = 0.01
maxmredshost = 0.4
minmredssour = 0.01
maxmredssour = 2.
numbreds = 200
retr_axis(gdat, 'redshost')
retr_axis(gdat, 'redssour')
gdat.meanpara.adishost = np.empty(numbreds)
for k in range(numbreds):
gdat.meanpara.adishost[k] = gdat.adisobjt(gdat.meanpara.redshost[k])
asca = 0.1 / gdat.anglfact
acut = 1. / gdat.anglfact
minmmass = np.zeros((numbreds + 1, numbreds + 1))
maxmmass = np.zeros((numbreds + 1, numbreds + 1))
for k, redshost in enumerate(gdat.binspara.redshost):
for n, redssour in enumerate(gdat.binspara.redssour):
if redssour > redshost:
adishost = gdat.adisobjt(redshost)
adissour = gdat.adisobjt(redssour)
adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost
factmcutfromdefs = retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut)
minmmass[n, k] = np.log10(factmcutfromdefs * gdat.minmdefs)
maxmmass[n, k] = np.log10(factmcutfromdefs * gdat.maxmdefs)
#valulevl = np.linspace(7.5, 9., 5)
valulevl = [7.0, 7.3, 7.7, 8., 8.6]
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, minmmass, 10, colors='g', levels=valulevl)
axis.clabel(cont, inline=1, fontsize=20, fmt='%.3g')
axis.set_xlabel(r'$z_{\rm{hst}}$')
axis.set_ylabel(r'$z_{\rm{src}}$')
axis.set_title(r'$M_{c,min}$ [$M_{\odot}$]')
path = gdat.pathinitintr + 'massredsminm.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
valulevl = np.linspace(9., 11., 20)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
imag = axis.imshow(maxmmass, extent=[minmredshost, maxmredshost, minmredssour, maxmredssour], aspect='auto', vmin=9., vmax=11.)
cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, maxmmass, 10, colors='g', levels=valulevl)
axis.clabel(cont, inline=1, fontsize=15, fmt='%.3g')
axis.set_xlabel('$z_{hst}$')
axis.set_ylabel('$z_{src}$')
axis.set_title(r'$M_{c,max}$ [$M_{\odot}$]')
path = gdat.pathinitintr + 'massredsmaxm.pdf'
plt.colorbar(imag)
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * gdat.sizepixl * 1e-3)
axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * 2. * gdat.maxmgangdata * 1e-3)
axis.set_xlabel('$z_h$')
axis.set_yscale('log')
axis.set_ylabel(r'$\lambda$ [kpc]')
path = gdat.pathinitintr + 'wlenreds.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
fracacutasca = np.logspace(-1., 2., 20)
mcut = retr_mcutfrommscl(fracacutasca)
axis.lognp.log(fracacutasca, mcut)
axis.set_xlabel(r'$\tau_n$')
axis.set_ylabel(r'$M_{c,n} / M_{0,n}$')
axis.axhline(1., ls='--')
path = gdat.pathinitintr + 'mcut.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
def retr_listrtagprev(strgcnfg, pathpcat):
# list of PCAT run plot outputs
pathimag = pathpcat + '/imag/'
listrtag = fnmatch.filter(os.listdir(pathimag), '2*')
listrtagprev = []
for rtag in listrtag:
strgstat = pathpcat + '/data/outp/' + rtag
if chec_statfile(pathpcat, rtag, 'gdatmodipost', typeverb=0) and strgcnfg + '_' + rtag[16:].split('_')[-1] == rtag[16:]:
listrtagprev.append(rtag)
listrtagprev.sort()
return listrtagprev
def make_legd(axis, offs=None, loca=1, numbcols=1, ptch=None, line=None):
hand, labl = axis.get_legend_handles_labels()
legd = axis.legend(hand, labl, fancybox=True, frameon=True, bbox_to_anchor=offs, bbox_transform=axis.transAxes, ncol=numbcols, loc=loca, labelspacing=1, handlelength=2)
legd.get_frame().set_fill(True)
legd.get_frame().set_facecolor('white')
def setp_namevarbsing(gdat, gmod, strgmodl, strgvarb, popl, ener, evtt, back, isfr, iele):
if popl == 'full':
indxpopltemp = gmod.indxpopl
elif popl != 'none':
indxpopltemp = [popl]
if ener == 'full':
indxenertemp = gdat.indxener
elif ener != 'none':
indxenertemp = [ener]
if evtt == 'full':
indxevtttemp = gdat.indxevtt
elif evtt != 'none':
indxevtttemp = [evtt]
if back == 'full':
gmod.indxbacktemp = gmod.indxback
elif isinstance(back, int):
gmod.indxbacktemp = np.array([back])
liststrgvarb = []
if iele != 'none':
for l in gmod.indxpopl:
if iele == 'full':
listiele = np.arange(gmod.maxmpara.numbelem)
else:
listiele = [iele]
for k in listiele:
liststrgvarb.append(strgvarb + 'pop%d%04d' % (l, k))
if popl != 'none' and ener == 'none' and evtt == 'none' and back == 'none' and iele == 'none':
for l in indxpopltemp:
liststrgvarb.append(strgvarb + 'pop%d' % l)
if popl == 'none' and ener == 'none' and evtt == 'none' and back == 'none' and isfr != 'none':
for e in indxisfrtemp:
liststrgvarb.append(strgvarb + 'isf%d' % e)
if popl == 'none' and ener != 'none' and evtt != 'none' and back == 'none':
for i in indxenertemp:
for m in indxevtttemp:
liststrgvarb.append(strgvarb + 'en%02devt%d' % (i, m))
if popl == 'none' and ener != 'none' and evtt == 'none' and back != 'none':
for c in gmod.indxbacktemp:
for i in indxenertemp:
liststrgvarb.append(strgvarb + 'back%04den%02d' % (c, i))
if popl == 'none' and ener == 'none' and evtt == 'none' and back != 'none':
for c in gmod.indxbacktemp:
liststrgvarb.append(strgvarb + 'back%04d' % c)
if popl == 'none' and ener != 'none' and evtt == 'none' and back == 'none':
for i in indxenertemp:
liststrgvarb.append(strgvarb + 'en%02d' % i)
if popl == 'none' and ener == 'none' and evtt == 'none' and back == 'none' and isfr == 'none':
liststrgvarb.append(strgvarb)
if gdat.booldiagmode:
for strgvarb in liststrgvarb:
if liststrgvarb.count(strgvarb) != 1:
print('liststrgvarb')
print(liststrgvarb)
print('popl')
print(popl)
print('ener')
print(ener)
print('evtt')
print(evtt)
print('back')
print(back)
print('isfr')
print(isfr)
print('iele')
print(iele)
raise Exception('')
return liststrgvarb
def setp_varb(gdat, strgvarbbase, valu=None, minm=None, maxm=None, scal='self', lablroot=None, lablunit='', mean=None, stdv=None, cmap=None, numbbins=10, \
popl='none', ener='none', evtt='none', back='none', isfr='none', iele='none', \
boolinvr=False, \
strgmodl=None, strgstat=None, \
):
'''
Set up variable values across all models (true and fitting) as well as all populations, energy bins,
event bins, background components, and Sersic components
'''
# determine the list of models
if strgmodl is None:
if gdat.typedata == 'mock':
liststrgmodl = ['true', 'fitt', 'plot']
else:
liststrgmodl = ['fitt', 'plot']
else:
if strgmodl == 'true' or strgmodl == 'plot' or strgmodl == 'refr':
liststrgmodl = [strgmodl]
else:
liststrgmodl = ['fitt', 'plot']
print('liststrgmodl')
print(liststrgmodl)
for strgmodl in liststrgmodl:
if strgmodl == 'plot':
gmod = gdat.fitt
gmodoutp = gdat
else:
gmod = getattr(gdat, strgmodl)
gmodoutp = gmod
# get the list of names of the variable
liststrgvarbnone = setp_namevarbsing(gdat, gmod, strgmodl, strgvarbbase, popl, ener, evtt, back, isfr, 'none')
if iele != 'none':
liststrgvarb = setp_namevarbsing(gdat, gmod, strgmodl, strgvarbbase, popl, ener, evtt, back, isfr, iele)
else:
liststrgvarb = liststrgvarbnone
# set the values of each variable in the list
for strgvarb in liststrgvarb:
if minm is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.minmpara, strgvarb, minm)
if maxm is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.maxmpara, strgvarb, maxm)
if mean is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.meanpara, strgvarb, mean)
if stdv is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.meanpara, strgvarb, stdv)
if valu is not None:
if strgstat is None:
print('strgvarb')
print(strgvarb)
print('strgmodl')
print(strgmodl)
print('valu')
print(valu)
print('')
setp_varbcore(gdat, strgmodl, gmodoutp, strgvarb, valu)
elif strgstat == 'this':
setp_varbcore(gdat, strgmodl, gmodoutp.this, strgvarb, valu)
if scal is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.scalpara, strgvarb, scal)
if lablroot is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.lablrootpara, strgvarb, lablroot)
if lablunit is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.lablunitpara, strgvarb, lablunit)
if cmap is not None:
setp_varbcore(gdat, strgmodl, gmodoutp.cmappara, strgvarb, cmap)
setp_varbcore(gdat, strgmodl, gmodoutp.numbbinspara, strgvarb, numbbins)
# create limt, bins, mean, and delt
if minm is not None and maxm is not None or mean is not None and stdv is not None:
# determine minima and maxima for Gaussian or log-Gaussian distributed parameters
if mean is not None:
minm = mean - gdat.numbstdvgaus * stdv
maxm = mean + gdat.numbstdvgaus * stdv
# uniformly-distributed
if scal == 'self' or scal == 'pois' or scal == 'gaus':
binsunif = np.linspace(minm, maxm, numbbins + 1)
if scal == 'logt' or scal == 'powr':
binsunif = np.linspace(np.log10(minm), np.log10(maxm), numbbins + 1)
if gdat.booldiagmode:
if minm <= 0.:
raise Exception('')
if scal == 'asnh':
binsunif = np.linspace(np.arcsinh(minm), np.arcsinh(maxm), numbbins + 1)
if boolinvr:
binsunif = binsunif[::-1]
meanparaunif = (binsunif[1:] + binsunif[:-1]) / 2.
if scal == 'self' or scal == 'pois' or scal == 'gaus':
meanpara = meanparaunif
bins = binsunif
minmunif = minm
maxmunif = maxm
if scal == 'logt' or scal == 'powr':
meanpara = 10**meanparaunif
bins = 10**binsunif
minmunif = np.log10(minm)
maxmunif = np.log10(maxm)
if scal == 'asnh':
meanpara = np.sinh(meanparaunif)
bins = np.sinh(binsunif)
minmunif = np.arcsinh(minm)
maxmunif = np.arcsinh(maxm)
delt = np.diff(bins)
limt = np.array([minm, maxm])
# 'self' is not yet defined
if scal == 'asnh' or scal == 'logt' or scal == 'powr':
listvalutickmajr, listlabltickmajr, listvalutickminr, listlabltickminr = tdpy.retr_valulabltick(minm, maxm, scal)
setattr(gmodoutp.labltickmajrpara, strgvarb, listlabltickmajr)
setattr(gmodoutp.valutickmajrpara, strgvarb, listvalutickmajr)
setattr(gmodoutp.labltickminrpara, strgvarb, listlabltickminr)
setattr(gmodoutp.valutickminrpara, strgvarb, listvalutickminr)
#labltick = np.empty(gdat.numbtickcbar, dtype=object)
#for k in range(gdat.numbtickcbar):
# if scal == 'asnh':
# valutick[k] = np.sinh(tickunif[k])
# if scal == 'logt' or scal == 'powr':
# valutick[k] = 10**(tickunif[k])
# # avoid very small, but nonzero central values in the residual count color maps
# if strgcbar == 'cntpresi' and np.fabs(valutick[k]) < 1e-5:
# valutick[k] = 0.
# if strgcbar == 'cntpdata' and np.amax(valutick) > 1e3:
# labltick[k] = '%d' % valutick[k]
# else:
# labltick[k] = '%.3g' % valutick[k]
setattr(gmodoutp.limtpara, strgvarb, limt)
setattr(gmodoutp.binspara, strgvarb, bins)
setattr(gmodoutp.meanpara, strgvarb, meanpara)
setattr(gmodoutp.deltpara, strgvarb, delt)
def retr_ticklabltemp(gdat, strgcbar):
minm = getattr(gdat.minmpara, strgcbar)
maxm = getattr(gdat.maxmpara, strgcbar)
scal = getattr(gdat.scalpara, strgcbar)
numb = gdat.numbtickcbar - 1
retr_axis(gdat, strgcbar, numb=numb)
minmscal = minm
if scal == 'asnh':
minmscal = np.arcsinh(minmscal)
if scal == 'logt':
minmscal = np.log10(minmscal)
maxmscal = maxm
if scal == 'asnh':
maxmscal = np.arcsinh(maxmscal)
if scal == 'logt':
maxmscal = np.log10(maxmscal)
tickscal = np.linspace(minmscal, maxmscal, gdat.numbtickcbar)
labl = np.empty(gdat.numbtickcbar, dtype=object)
tick = np.copy(tickscal)
for k in range(gdat.numbtickcbar):
if scal == 'asnh':
tick[k] = np.sinh(tickscal[k])
elif scal == 'logt':
tick[k] = 10**(tickscal[k])
# avoid very small, but nonzero central values in the residual count color maps
if strgcbar == 'cntpresi' and np.fabs(tick[k]) < 1e-5:
tick[k] = 0.
if strgcbar == 'cntpdata' and np.amax(tick) > 1e3:
labl[k] = '%d' % tick[k]
else:
labl[k] = '%.3g' % tick[k]
setattr(gdat.tickpara, strgcbar, tick)
def retr_axistemp(gdat, strgvarb, strgmodl=None, boolinvr=False):
if strgmodl is None:
listgdattemp = [gdat]
for strgmodl in gdat.liststrgmodl:
listgdattemp.append(getattr(gdat, strgmodl))
elif strgmodl == 'fitt' or strgmodl == 'true':
listgdattemp = [getattr(gdat, strgmodl)]
elif strgmodl == 'allm':
listgdattemp = []
for strgmodl in gdat.liststrgmodl:
listgdattemp = getattr(gdat, strgmodl)
for gdattemp in listgdattemp:
minm = getattr(gdattemp.minmpara, strgvarb)
maxm = getattr(gdattemp.maxmpara, strgvarb)
numb = getattr(gdattemp.numbbinspara, strgvarb)
scal = getattr(gdattemp.scalpara, strgvarb)
if scal == 'self' or scal == 'pois' or scal == 'gaus':
binsscal = np.linspace(minm, maxm, numb + 1)
if scal == 'logt':
print('minm')
print(minm)
print('maxm')
print(maxm)
print('strgvarb')
print(strgvarb)
binsscal = np.linspace(np.log10(minm), np.log10(maxm), numb + 1)
print('')
if gdat.booldiagmode:
if minm <= 0.:
raise Exception('')
if scal == 'asnh':
binsscal = np.linspace(np.arcsinh(minm), np.arcsinh(maxm), numb + 1)
if boolinvr:
binsscal = binsscal[::-1]
meanvarbscal = (binsscal[1:] + binsscal[:-1]) / 2.
if scal == 'self' or scal == 'pois' or scal == 'gaus':
meanvarb = meanvarbscal
bins = binsscal
if scal == 'logt':
meanvarb = 10**meanvarbscal
bins = 10**binsscal
if scal == 'asnh':
meanvarb = np.sinh(meanvarbscal)
bins = np.sinh(binsscal)
delt = np.diff(bins)
limt = np.array([np.amin(bins), np.amax(bins)])
setattr(gdattemp.limtpara, strgvarb, limt)
setattr(gdattemp.binspara, strgvarb, bins)
setattr(gdattemp.meanpara, strgvarb, meanvarb)
setattr(gdattemp.deltpara, strgvarb, delt)
def setp_varbcore(gdat, strgmodl, gdattemp, strgvarbtemp, valu):
# check if the variable is defined by the user
try:
valutemp = getattr(gdattemp, strgvarbtemp)
if valutemp is None:
raise
if gdat.typeverb > 0:
print('Received custom value for %s, %s: %s' % (strgvarbtemp, strgmodl, valutemp))
# if not defined or defined as None, define it
except:
setattr(gdattemp, strgvarbtemp, valu)
def intp_sinc(gdat, lgal, bgal):
intpsinc = 4. * gdat.numbsidepsfn**2 * np.sum(gdat.temppsfn * sinc(gdat.numbsidepsfn * (gdat.gridpsfnlgal + lgal) - gdat.gridpsfnlgal) * \
sinc(gdat.numbsidepsfn * (gdat.gridpsfnbgal + bgal) - gdat.gridpsfnbgal))
return intpsinc
def retr_fluxbrgt(gdat, lgal, bgal, flux):
if lgal.size == 0:
fluxbrgt = np.array([0.])
fluxbrgtassc = np.array([0.])
else:
indxbrgt = np.argmax(flux)
fluxbrgt = flux[indxbrgt]
return fluxbrgt, fluxbrgtassc
def init_figr(gdat, gdatmodi, strgpdfn, strgplot, strgstat, strgmodl, indxenerplot, indxevttplot, indxpoplplot):
figrsize = (gdat.sizeimag, gdat.sizeimag)
figr, axis = plt.subplots(figsize=figrsize)
nameplot = strgplot
if gdat.numbener > 1:
nameplot += 'en%02d' % gdat.indxenerincl[indxenerplot]
if gdat.numbener > 1:
if indxevttplot == -1:
nameplot += 'evtA'
else:
nameplot += 'evt%d' % gdat.indxevttincl[indxevttplot]
if gdat.fitt.numbpopl > 1:
if indxpoplplot == -1:
nameplot += 'popA'
else:
nameplot += 'pop%d' % indxpoplplot
path = retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, nameplot)
print('gdat.fitt.labltotlpara.lgalpop0')
print(gdat.fitt.labltotlpara.lgalpop0)
print('gdat.fitt.labltotlpara.bgalpop0')
print(gdat.fitt.labltotlpara.bgalpop0)
axis.set_xlabel(gdat.fitt.labltotlpara.lgalpop0)
axis.set_ylabel(gdat.fitt.labltotlpara.bgalpop0)
titl = ''
if indxenerplot is not None and gdat.numbener > 1 and strgplot.endswith('cnts'):
titl = gdat.strgener[indxenerplot]
if indxevttplot is not None and gdat.numbevtt > 1 and strgplot.endswith('cnts'):
titl += ' ' + gdat.strgevtt[indxevttplot]
axis.set_title(titl)
return figr, axis, path
def draw_frambndr(gdat, axis):
outr = max(gdat.frambndrmodl, gdat.frambndrdata)
axis.set_xlim([-outr, outr])
axis.set_ylim([-outr, outr])
innr = min(gdat.frambndrmodl, gdat.frambndrdata)
axis.axvline(innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axvline(-innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axhline(innr, ls='--', alpha=gdat.alphbndr, color='black')
axis.axhline(-innr, ls='--', alpha=gdat.alphbndr, color='black')
def retr_imag(gdat, axis, maps, strgstat, strgmodl, strgcbar, indxenerplot=None, indxevttplot=-1, booltdim=False, imag=None):
draw_frambndr(gdat, axis)
# take the relevant energy and PSF bins
if indxenerplot is not None:
if indxevttplot == -1:
maps = np.sum(maps[indxenerplot, ...], axis=1)
else:
maps = maps[indxenerplot, :, indxevttplot]
# project the map to 2D
if gdat.typepixl == 'heal':
maps = tdpy.retr_cart(maps, indxpixlrofi=gdat.indxpixlrofi, numbsideinpt=gdat.numbsideheal, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata)
if gdat.typepixl == 'cart':
shap = [gdat.numbsidecart] + list(maps.shape)
shap[1] = gdat.numbsidecart
shapflat = list(maps.shape)
shapflat[0] = gdat.numbpixlfull
mapstemp = np.zeros(shapflat)
if maps.size == gdat.indxpixlrofi.size:
mapstemp[gdat.indxpixlrofi, ...] = maps
else:
mapstemp[:, ...] = maps
maps = mapstemp.reshape(shap).swapaxes(0, 1)
# temp -- this is needed to bring the Fermi-LAT map to the right direction
#maps = fliplr(maps)
# rescale the map
if strgmodl is not None:
gmod = getattr(gdat, strgmodl)
else:
gmod = gdat
scal = getattr(gdat.scalpara, strgcbar)
cmap = getattr(gdat.cmappara, strgcbar)
vmin = getattr(gdat.minmpara, strgcbar)
vmax = getattr(gdat.maxmpara, strgcbar)
if scal == 'asnh':
maps = np.arcsinh(maps)
if scal == 'logt':
maps = np.log10(maps)
if imag is None:
imag = axis.imshow(maps, cmap=cmap, origin='lower', extent=gdat.exttrofi, interpolation='nearest', vmin=vmin, vmax=vmax, alpha=gdat.alphmaps)
return imag
else:
imag.set_data(maps)
def make_cbar(gdat, axis, imag, strgvarb):
# make a color bar
valutickmajr = getattr(gdat.valutickmajrpara, strgvarb)
labltickmajr = getattr(gdat.labltickmajrpara, strgvarb)
print('valutickmajr')
print(valutickmajr)
print('labltickmajr')
print(labltickmajr)
cbar = plt.colorbar(imag, ax=axis, fraction=0.05, aspect=15)
cbar.set_ticks(valutickmajr)
cbar.set_ticklabels(labltickmajr)
return cbar
def make_legdmaps(gdat, strgstat, strgmodl, axis, mosa=False, assc=False):
gmod = getattr(gdat, strgmodl)
# transdimensional elements
if strgmodl == 'fitt' and (strgstat == 'pdfn' and gdat.boolcondcatl or strgstat == 'this') and gmod.numbparaelem > 0:
for l in gmod.indxpopl:
colr = retr_colr(gdat, strgstat, strgmodl, l)
if strgstat == 'pdfn':
labl = 'Condensed %s %s' % (gmod.legd, gmod.legdpopl[l])
else:
labl = 'Sample %s %s' % (gmod.legd, gmod.legdpopl[l])
if not gmod.maxmpara.numbelem[l] == 0:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, \
label=labl, marker=gmod.listelemmrkr[l], lw=gdat.mrkrlinewdth, color=colr)
for q in gdat.indxrefr:
if not np.amax(gdat.refr.numbelem[q]) == 0:
if assc:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, \
label=gdat.refr.lablhits[q], marker=gdat.refr.listmrkrhits[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.lablmiss[q], marker=gdat.refr.listmrkrmiss[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
else:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.lablelem[q], marker=gdat.refr.listmrkrmiss[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
# fixed-dimensional objects
if strgmodl == 'fitt':
if gmod.boollens:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Source' % gmod.lablmodl, marker='<', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.typeemishost != 'none':
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Host' % gmod.lablmodl, marker='s', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gdat.typedata == 'mock':
if gmod.boollens:
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Source' % gdat.refr.labl, marker='>', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
if gmod.typeemishost != 'none':
axis.scatter(gdat.anglfact * gdat.maxmgangdata * 5., gdat.anglfact * gdat.maxmgangdata * 5, s=50, alpha=gdat.alphelem, facecolor='none', \
label='%s Host' % gdat.refr.labl, marker='D', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
temphand, temp = axis.get_legend_handles_labels()
numblabl = len(temp)
if numblabl == 4:
numbcols = 2
else:
numbcols = 3
if mosa:
axis.legend(bbox_to_anchor=[1., 1.15], loc='center', ncol=numbcols)
else:
axis.legend(bbox_to_anchor=[0.5, 1.15], loc='center', ncol=numbcols)
def supr_fram(gdat, gdatmodi, strgstat, strgmodl, axis, indxpoplplot=-1, assc=False):
gmod = getattr(gdat, strgmodl)
gmodstat = getattr(gmod, strgstat)
# associations with the reference elements
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] > 0:
if indxpoplplot == -1:
listindxpoplplot = gmod.indxpopl
else:
listindxpoplplot = [indxpoplplot]
for l in listindxpoplplot:
reframpl = gdat.refr.dictelem[q][gdat.refr.nameparagenrelemampl[q]][0, :]
mrkrsize = retr_mrkrsize(gdat, strgmodl, reframpl, gdat.refr.nameparagenrelemampl[q])
lgal = np.copy(gdat.refr.dictelem[q]['lgal'][0, :])
bgal = np.copy(gdat.refr.dictelem[q]['bgal'][0, :])
numbelem = int(gdat.refr.numbelem[q])
if gdatmodi is not None and gmod.numbparaelem > 0 and assc:
### hit
indx = gdatmodi.this.indxelemrefrasschits[q][l]
if indx.size > 0:
axis.scatter(gdat.anglfact * lgal[indx], gdat.anglfact * bgal[indx], s=mrkrsize[indx], alpha=gdat.alphelem, label=gdat.refr.lablhits, \
marker=gdat.refrlistmrkrhits[q], lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
### missed
indx = gdatmodi.this.indxelemrefrasscmiss[q][l]
else:
indx = np.arange(lgal.size)
if indx.size > 0:
axis.scatter(gdat.anglfact * lgal[indx], gdat.anglfact * bgal[indx], s=mrkrsize[indx], alpha=gdat.alphelem, facecolor='none', \
label=gdat.refr.listlablmiss, marker=gdat.refr.listmrkrmiss[q], \
lw=gdat.mrkrlinewdth, color=gdat.refr.colrelem[q])
sizexoff = gdat.maxmgangdata * 0.05 * gdat.anglfact
sizeyoff = gdat.maxmgangdata * 0.05 * gdat.anglfact
if 'etag' in gdat.refr.namepara.elem[q]:
for k in range(indx.size):
axis.text(gdat.anglfact * lgal[indx[k]] + sizexoff, gdat.anglfact * bgal[indx[k]] + sizeyoff, gdat.refretag[q][indx[k]], \
verticalalignment='center', horizontalalignment='center', \
color='red', fontsize=1)
# temp -- generalize this to input refrlgalhost vs.
if gdat.typedata == 'mock':
## host galaxy position
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
lgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
bgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
axis.scatter(gdat.anglfact * lgalhost, gdat.anglfact * bgalhost, facecolor='none', alpha=0.7, \
label='%s Host %d' % (gdat.refr.labl, e), s=300, marker='D', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
if gmod.boollens:
## host galaxy Einstein radius
for e in gmod.indxsersfgrd:
truelgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
truebgalhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
truebeinhost = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % (e))]
axis.add_patch(plt.Circle((gdat.anglfact * truelgalhost, \
gdat.anglfact * truebgalhost), \
gdat.anglfact * truebeinhost, \
edgecolor=gdat.refr.colr, facecolor='none', lw=gdat.mrkrlinewdth))
if gmod.boollens:
## source galaxy position
axis.scatter(gdat.anglfact * gmodstat.paragenrscalfull[gmod.indxpara.lgalsour], \
gdat.anglfact * gmodstat.paragenrscalfull[gmod.indxpara.bgalsour], \
facecolor='none', \
alpha=0.7, \
#alpha=gdat.alphelem, \
label='%s Source' % gdat.refr.labl, s=300, marker='>', lw=gdat.mrkrlinewdth, color=gdat.refr.colr)
# model catalog
if indxpoplplot == -1:
listindxpoplplot = gmod.indxpopl
else:
listindxpoplplot = [indxpoplplot]
for l in listindxpoplplot:
if gdatmodi is not None:
if gmod.numbparaelem > 0:
colr = retr_colr(gdat, strgstat, strgmodl, l)
mrkrsize = retr_mrkrsize(gdat, strgmodl, gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[gmod.nameparagenrelemampl[l]][l]], gmod.nameparagenrelemampl[l])
if 'lgal' in gdatmodi.this.indxparagenrfullelem:
lgal = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['lgal']]
bgal = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['bgal']]
else:
gang = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['gang']]
aang = gdatmodi.this.paragenrscalfull[gdatmodi.this.indxparagenrfullelem[l]['aang']]
lgal, bgal = retr_lgalbgal(gang, aang)
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, alpha=gdat.alphelem, label='Sample', marker=gmod.listelemmrkr[l], \
lw=gdat.mrkrlinewdth, color=colr)
## source
if gmod.boollens:
lgalsour = gdatmodi.this.paragenrscalfull[gmod.indxpara.lgalsour]
bgalsour = gdatmodi.this.paragenrscalfull[gmod.indxpara.bgalsour]
axis.scatter(gdat.anglfact * lgalsour, gdat.anglfact * bgalsour, facecolor='none', \
alpha=gdat.alphelem, \
label='%s Source' % gmod.lablpara, s=300, marker='<', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.typeemishost != 'none':
## host
lgalhost = [[] for e in gmod.indxsersfgrd]
bgalhost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
lgalhost[e] = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % (e))]
bgalhost[e] = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % (e))]
axis.scatter(gdat.anglfact * lgalhost[e], gdat.anglfact * bgalhost[e], facecolor='none', \
alpha=gdat.alphelem, \
label='%s Host' % gmod.lablpara, s=300, marker='s', lw=gdat.mrkrlinewdth, color=gmod.colr)
if gmod.boollens:
beinhost = gdatmodi.this.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % (e))]
axis.add_patch(plt.Circle((gdat.anglfact * lgalhost[e], gdat.anglfact * bgalhost[e]), \
gdat.anglfact * beinhost, edgecolor=gmod.colr, facecolor='none', \
lw=gdat.mrkrlinewdth, ls='--'))
# temp
if strgstat == 'pdfn' and gdat.boolcondcatl and gmod.numbparaelem > 0:
lgal = np.zeros(gdat.numbprvlhigh)
bgal = np.zeros(gdat.numbprvlhigh)
ampl = np.zeros(gdat.numbprvlhigh)
cntr = 0
for r in gdat.indxstkscond:
if r in gdat.indxprvlhigh:
lgal[cntr] = gdat.dictglob['poststkscond'][r]['lgal'][0]
bgal[cntr] = gdat.dictglob['poststkscond'][r]['bgal'][0]
# temp -- this does not allow sources with different spectra to be assigned to the same stacked sample
ampl[cntr] = gdat.dictglob['poststkscond'][r][gmod.nameparagenrelemampl[l]][0]
cntr += 1
mrkrsize = retr_mrkrsize(gdat, strgmodl, ampl, gmod.nameparagenrelemampl[l])
colr = retr_colr(gdat, strgstat, strgmodl, l)
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, \
label='Condensed', marker=gmod.listelemmrkr[l], color='black', lw=gdat.mrkrlinewdth)
for r in gdat.indxstkscond:
lgal = np.array([gdat.dictglob['liststkscond'][r]['lgal']])
bgal = np.array([gdat.dictglob['liststkscond'][r]['bgal']])
axis.scatter(gdat.anglfact * lgal, gdat.anglfact * bgal, s=mrkrsize, \
marker=gmod.listelemmrkr[l], color='black', alpha=0.1, lw=gdat.mrkrlinewdth)
def retr_colr(gdat, strgstat, strgmodl, indxpopl=None):
if strgmodl == 'true':
if indxpopl is None:
colr = gdat.refr.colr
else:
colr = gdat.refr.colrelem[indxpopl]
if strgmodl == 'fitt':
if strgstat == 'this' or strgstat == 'pdfn':
if indxpopl is None:
colr = gmod.colr
else:
colr = gmod.colrelem[indxpopl]
if strgstat == 'mlik':
colr = 'r'
return colr
def retr_levipost(listllik):
minmlistllik = np.amin(listllik)
levipost = np.log(np.mean(1. / np.exp(listllik - minmlistllik))) + minmlistllik
return levipost
def retr_infofromlevi(pmeallik, levi):
info = pmeallik - levi
return info
def retr_jcbn():
fluxpare, lgalpare, bgalpare, fluxauxi, lgalauxi, bgalauxi = sympy.symbols('fluxpare lgalpare bgalpare fluxauxi lgalauxi bgalauxi')
matr = sympy.Matrix([[ fluxpare, fluxauxi, 0, 0, 0, 0], \
[-fluxpare, 1 - fluxauxi, 0, 0, 0, 0], \
[-lgalauxi, 0, 1, 1 - fluxauxi, 0, 0], \
[-lgalauxi, 0, 1, -fluxauxi, 0, 0], \
[-bgalauxi, 0, 0, 0, 1, 1 - fluxauxi], \
[-bgalauxi, 0, 0, 0, 1, -fluxauxi]])
jcbn = matr.det()
return jcbn
# f1 = uf f0
# f2 = (1 - uf) f0
# x1 = x0 + (1 - uf) ux
# x2 = x0 - uf ux
# y1 = y0 + (1 - uf) uy
# y2 = y0 - uf uy
# f1/uf f1/f0 f1/x0 f1/ux f1/y0 f1/uy
# f2/uf f2/f0 f2/x0 f2/ux f2/y0 f2/uy
# x1/uf x1/f0 x1/x0 x1/ux x1/y0 x1/uy
# x2/uf x2/f0 x2/x0 x2/ux x2/y0 x2/uy
# y1/uf y1/f0 y1/x0 y1/ux y1/y0 y1/uy
# y2/uf y2/f0 y2/x0 y2/ux y2/y0 y2/uy
# f0 uf 0 0 0 0
# -f0 1 - uf 0 0 0 0
# -ux 0 1 1 - uf 0 0
# -ux 0 1 -uf 0 0
# -uy 0 0 0 1 1 - uf
# -uy 0 0 0 1 -uf
# f0
#retr_jcbn()
def retr_angldist(gdat, lgalfrst, bgalfrst, lgalseco, bgalseco):
# temp -- heal does not work when the dimension of lgalfrst is 1
if gdat.typepixl == 'heal':
dir1 = np.array([lgalfrst, bgalfrst])
dir2 = np.array([lgalseco, bgalseco])
angldist = hp.rotator.angdist(dir1, dir2)
else:
angldist = np.sqrt((lgalfrst - lgalseco)**2 + (bgalfrst - bgalseco)**2)
return angldist
def retr_deflextr(gdat, indxpixlelem, sher, sang):
factcosi = sher * np.cos(2. * sang)
factsine = sher * np.cos(2. * sang)
defllgal = factcosi * gdat.lgalgrid[indxpixlelem] + factsine * gdat.bgalgrid[indxpixlelem]
deflbgal = factsine * gdat.lgalgrid[indxpixlelem] - factcosi * gdat.bgalgrid[indxpixlelem]
return np.vstack((defllgal, deflbgal)).T
def readfile(path):
print('Reading %s...' % path)
filepick = open(path + '.p', 'rb')
filearry = h5py.File(path + '.h5', 'r')
gdattemptemp = pickle.load(filepick)
for attr in filearry:
setattr(gdattemptemp, attr, filearry[attr][()])
filepick.close()
filearry.close()
if 'gdatfinl' in path or 'gdatinit' in path:
if hasattr(gdattemptemp, 'edis') and gdattemptemp.edis is not None and hasattr(gdattemptemp, 'binsener'):
gdattemptemp.edisintp = sp.interpolate.interp1d(gdattemptemp.binsener, gdattemptemp.edis, fill_value='extrapolate')
gdattemptemp.adisobjt = sp.interpolate.interp1d(gdattemptemp.redsintp, gdattemptemp.adisintp, fill_value='extrapolate')
gdattemptemp.redsfromdlosobjt = sp.interpolate.interp1d(gdattemptemp.adisintp * gdattemptemp.redsintp, \
gdattemptemp.redsintp, fill_value='extrapolate')
return gdattemptemp
def init_stat(gdat):
# construct the initial state
if gdat.typeverb > 0:
print('Initializing the sampler state...')
print('inittype')
print(gdat.inittype)
gmod = gdat.fitt
## initialization
### initialize the unit sample vector randomly
gmod.this.paragenrunitfull = np.random.rand(gmod.numbparagenrfull)
gmod.this.paragenrscalfull = np.empty(gmod.numbparagenrfull)
## impose user-specified initial state
### number of elements
## create dummy indxparagenrfullelem
gmod.this.indxparagenrfullelem = None
if gmod.numbparaelem > 0:
if gdat.inittype == 'refr':
for l in gmod.indxpopl:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = gmod.paragenrunitfull[gmod.indxpara.numbelem[l]]
else:
for l in gmod.indxpopl:
if gmod.typemodltran == 'pois':
meanelemtemp = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, \
gmod.this.indxparagenrfullelem)[gmod.indxpara.meanelem[l]]
print('temp -- user input is not working for numbelem')
#namevarb = 'numbelempop%d' % l
#initvalu = getattr(gmod.init, namevarb)
#if initvalu > gmod.maxmpara.numbelem[l] or initvalu < gmod.minmpara.numbelem[l]:
# raise Exception('Bad initial number of elements...')
#gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = initvalu
if gmod.typemodltran == 'pois':
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = np.random.poisson(meanelemtemp)
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = round(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]])
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = \
min(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]], gmod.maxmpara.numbelem[l])
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = \
max(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]], gmod.minmpara.numbelem[l])
gmod.this.paragenrscalfull[gmod.indxpara.numbelem[l]] = gmod.this.paragenrscalfull[gmod.indxpara.numbelem[l]]
if gdat.booldiagmode:
if gdat.typedata == 'mock' and gdat.inittype == 'refr':
for l in gmod.indxpopl:
if gmod.paragenrunitfull[gmod.indxpara.numbelem[l]] > gmod.maxmpara.numbelem[l]:
raise Exception('')
if gmod.numbparaelem > 0:
gmod.this.indxelemfull = []
for l in gmod.indxpopl:
gmod.this.indxelemfull.append(list(range(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]].astype(int))))
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
if gdat.inittype == 'reco':
if gdat.namerecostat is not None:
strgcnfg = gdat.namerecostat
else:
strgcnfg = gdat.strgcnfg
path = gdat.pathoutp + 'stat_' + strgcnfg + '.h5'
if os.path.exists(path):
boolinitreco = True
thisfile = h5py.File(path, 'r')
if gdat.typeverb > 0:
print('Initializing from the state %s...' % path)
print('Likelihood:')
print(thisfile['lliktotl'][...])
# find the number of populations provided
maxmindxpopl = 0
for l in range(10):
for attr in thisfile:
if attr.startswith('lgalpop'):
gmod.indxpopl = int(attr[7])
if gmod.indxpopl > maxmindxpopl:
maxmindxpopl = gmod.indxpopl
numbpoplinpt = maxmindxpopl + 1
if numbpoplinpt != gmod.numbpopl:
print('State file and fitting metamodel have different number of populations.')
# find the number of elements provided
cntr = np.zeros(gmod.numbpoplinpt, dtype=int)
for attr in thisfile:
if attr.startswith('lgalpop'):
gmod.indxpopl = int(attr[7])
cntr[indxpopl] += 1
if gdat.typeverb > 0:
print('Number of elements found:')
print(cntr)
for attr in thisfile:
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if gmod.nameparagenrbase == attr:
if gmod.nameparagenrbase.startswith('numbelem'):
try:
indxpopltemp = int(gmod.nameparagenrbase[-1])
initnumbelem = getattr(gdat, 'initnumbelempop%d' % indxpopltemp)
print('Initial condition for the number of elements conflicts with the state file. Defaulting to the argument...')
except:
initnumbelem = thisfile[attr][()]
gmod.this.paragenrunitfull[k] = initnumbelem
else:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', thisfile[attr][()], k)
if gmod.this.paragenrunitfull[k] == 0.:
print('Warning CDF is zero.')
if not np.isfinite(thisfile[attr][()]):
raise Exception('Retreived state parameter is not finite.')
if (gmod.numbparaelem == 0 or gmod.numbparaelem > 0 and not k in gmod.indxpara.numbelem) and \
(not np.isfinite(gmod.this.paragenrunitfull[k]) or gmod.this.paragenrunitfull[k] < 0. or \
gmod.this.paragenrunitfull[k] > 1.):
raise Exception('CDF of the retreived state parameter is bad.')
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
maxm.numbelem = getattr(gdat.fitt.maxm, 'numbelempop%d' % l)
if gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] > maxm.numbelem:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = maxm.numbelem
if gdat.typeverb > 0:
print('Tapering off the element list...')
gmod.this.indxelemfull = []
for l in gmod.indxpopl:
gmod.this.indxelemfull.append(list(range(gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]].astype(int))))
if gdat.typeverb > 0:
print('gmod.this.paragenrunitfull[gmod.indxpara.numbelem]')
print(gmod.this.paragenrunitfull[gmod.indxpara.numbelem])
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
if (gmod.this.paragenrunitfull == 0).all():
raise Exception('Bad initialization.')
if gmod.numbparaelem > 0 and gmod.this.indxparagenrfullelem is not None:
for nameparagenrelem in gmod.namepara.elem:
initcomp = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
initcomp[l] = np.empty(len(gmod.this.indxelemfull[l]))
for k in range(len(gmod.this.indxelemfull[l])):
namefiel = '%spop%d%04d' % (nameparagenrelem, l, k)
for attr in thisfile:
if namefiel == attr:
initcomp[l][k] = thisfile[namefiel][()]
setattr(gdat, 'init' + nameparagenrelem, initcomp)
initcompfromstat(gdat, gdatmodi, 'init')
thisfile.close()
else:
boolinitreco = False
if gdat.typeverb > 0:
print('Could not find the state file, %s, to initialize the sampler.' % path)
if gdat.inittype == 'refr':
if gdat.typedata == 'inpt':
for l in gmod.indxpopl:
gmod.this.paragenrunitfull[gmod.indxpara.numbelem[l]] = gdat.refr.numbelem[l]
if gdat.typedata == 'mock':
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if not (gdat.inittype == 'pert' and gmod.nameparagenrbase.startswith('numbelem')) and \
gmod.nameparagenrbase in gmod.nameparagenrbase:
gmod.indxpara.true = np.where(gmod.nameparagenrbase == gmod.nameparagenrbase)[0]
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gmodstat.paragenrscalfull[gmod.indxpara.true], k)
if gmod.numbparaelem > 0:
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
if gdat.typeverb > 1:
show_paragenrscalfull(gdat, gdatmodi)
if gmod.this.indxparagenrfullelem is not None:
print('Initializing elements from the reference element parameters...')
show_paragenrscalfull(gdat, gdatmodi)
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
show_paragenrscalfull(gdat, gdatmodi)
initcompfromstat(gdat, gdatmodi, 'refr')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
## impose user-specified individual initial values
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if gmod.nameparagenrbase.startswith('numbelem'):
continue
if gdat.inittype == 'reco' or gdat.inittype == 'refr' or gdat.inittype == 'pert':
try:
getattr(gdat, 'init' + gmod.nameparagenrbase)
print('Conflicting initial state arguments detected, init keyword takes precedence.')
except:
pass
try:
raise Exception('')
initvalu = getattr(gdat, 'init' + gmod.nameparagenrbase)
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', initvalu, k)
if gdat.typeverb > 0:
print('Received initial condition for %s: %.3g' % (gmod.nameparagenrbase, initvalu))
except:
pass
## PSF
if gdat.initpsfp is not None:
print('Initializing the metamodel PSF from the provided initial state...')
if gdat.initpsfp.size != gmod.indxpara.psfp.size:
raise Exception('')
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if k in gmod.indxpara.psfp:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gdat.initpsfp[k-gmod.indxpara.psfp[0]], k)
if gdat.initpsfprefr:
print('Initializing the metamodel PSF from the reference state...')
for k, gmod.nameparagenrbase in enumerate(gmod.nameparagenrbase):
if k in gmod.indxpara.psfp:
gmod.this.paragenrunitfull[k] = cdfn_paragenrscalbase(gdat.fitt, '', gmod.psfpexpr[k-gmod.indxpara.psfp[0]], k)
if gdat.inittype == 'rand' or gdat.inittype == 'reco' and not boolinitreco:
if gdat.typeverb > 0:
print('Initializing from a random state...')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
if gmod.numbparaelem > 0:
gmod.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmod.this.indxelemfull, 'fitt')
# check the initial unit sample vector for bad entries
if gmod.numbparaelem > 0:
indxsampdiff = np.setdiff1d(gmod.indxparagenrfull, gmod.indxpara.numbelem)
if np.logical_not(np.isfinite(gmod.this.paragenrunitfull[indxsampdiff])).any():
raise Exception('')
indxsampbaddlowr = np.where((gmod.this.paragenrunitfull[indxsampdiff] <= 0.) | np.logical_not(np.isfinite(gmod.this.paragenrunitfull[indxsampdiff])))[0]
indxsampbadduppr = np.where(gmod.this.paragenrunitfull[indxsampdiff] >= 1.)[0]
indxsampbaddlowr = indxsampdiff[indxsampbaddlowr]
indxsampbadduppr = indxsampdiff[indxsampbadduppr]
else:
indxsampbaddlowr = np.where(gmod.this.paragenrunitfull <= 0.)[0]
indxsampbadduppr = np.where(gmod.this.paragenrunitfull >= 1.)[0]
indxsampbadd = np.concatenate((indxsampbaddlowr, indxsampbadduppr))
if indxsampbadd.size > 0:
print('Initial value caused unit sample vector to go outside the unit interval...')
show_paragenrscalfull(gdat, gdatmodi, indxsampshow=indxsampbadd)
gmod.this.paragenrunitfull[indxsampbadd] = np.random.rand(indxsampbadd.size)
raise Exception('')
gmod.this.paragenrscalfull = icdf_paragenrscalfull(gdat, 'fitt', gmod.this.paragenrunitfull, gmod.this.indxparagenrfullelem)
indxbadd = np.where(np.logical_not(np.isfinite(gmod.this.paragenrscalfull)))[0]
if indxbadd.size > 0:
raise Exception('')
def writfile(gdattemp, path):
filepick = open(path + '.p', 'wb')
filearry = h5py.File(path + '.h5', 'w')
gdattemptemp = tdpy.gdatstrt()
for attr, valu in gdattemp.__dict__.items():
if attr.endswith('psfnintp'):
continue
if isinstance(valu, np.ndarray) and valu.dtype != np.dtype('O') and valu.dtype != np.dtype('<U4'):# or isinstance(valu, str) or \
#isinstance(valu, float) or isinstance(valu, bool) or isinstance(valu, int) or isinstance(valu, np.float):
filearry.create_dataset(attr, data=valu)
else:
# temp -- make sure interpolation objects are not written.
if attr != 'adisobjt' and attr != 'redsfromdlosobjt' and attr != 'edisintp':
setattr(gdattemptemp, attr, valu)
print('Writing to %s...' % path)
pickle.dump(gdattemptemp, filepick, protocol=pickle.HIGHEST_PROTOCOL)
filepick.close()
filearry.close()
def retr_deflcutf(angl, defs, asca, acut, asym=False):
fracanglasca = angl / asca
deflcutf = defs / fracanglasca
# second term in the NFW deflection profile
fact = np.ones_like(fracanglasca)
indxlowr = np.where(fracanglasca < 1.)[0]
indxuppr = np.where(fracanglasca > 1.)[0]
fact[indxlowr] = np.arccosh(1. / fracanglasca[indxlowr]) / np.sqrt(1. - fracanglasca[indxlowr]**2)
fact[indxuppr] = np.arccos(1. / fracanglasca[indxuppr]) / np.sqrt(fracanglasca[indxuppr]**2 - 1.)
if asym:
deflcutf *= np.log(fracanglasca / 2.) + fact
else:
fracacutasca = acut / asca
factcutf = fracacutasca**2 / (fracacutasca**2 + 1)**2 * ((fracacutasca**2 + 1. + 2. * (fracanglasca**2 - 1.)) * fact + \
np.pi * fracacutasca + (fracacutasca**2 - 1.) * np.log(fracacutasca) + np.sqrt(fracanglasca**2 + fracacutasca**2) * (-np.pi + (fracacutasca**2 - 1.) / fracacutasca * \
np.log(fracanglasca / (np.sqrt(fracanglasca**2 + fracacutasca**2) + fracacutasca))))
deflcutf *= factcutf
return deflcutf
def initchro(gdat, gdatmodi, name):
if gdatmodi is not None:
setattr(gdatmodi.this, 'chro' + name, gdat.functime())
def stopchro(gdat, gdatmodi, name):
if gdatmodi is not None:
setattr(gdatmodi.this, 'chro' + name, gdat.functime() - getattr(gdatmodi.this, 'chro' + name))
def retr_defl(gdat, indxpixlelem, lgal, bgal, angllens, ellp=None, angl=None, rcor=None, asca=None, acut=None):
# translate the grid
lgaltran = gdat.lgalgrid[indxpixlelem] - lgal
bgaltran = gdat.bgalgrid[indxpixlelem] - bgal
if acut is not None:
defs = angllens
angl = np.sqrt(lgaltran**2 + bgaltran**2)
defl = retr_deflcutf(angl, defs, asca, acut)
defllgal = lgaltran / angl * defl
deflbgal = bgaltran / angl * defl
else:
bein = angllens
# rotate the grid
lgalrttr = np.cos(angl) * lgaltran - np.sin(angl) * bgaltran
bgalrttr = np.sin(angl) * lgaltran + np.cos(angl) * bgaltran
axisrati = 1. - ellp
facteccc = np.sqrt(1. - axisrati**2)
factrcor = np.sqrt(axisrati**2 * lgalrttr**2 + bgalrttr**2)
defllgalrttr = bein * axisrati / facteccc * np.arctan(facteccc * lgalrttr / factrcor)
deflbgalrttr = bein * axisrati / facteccc * np.arctanh(facteccc * bgalrttr / factrcor)
# totate back vector to original basis
defllgal = np.cos(angl) * defllgalrttr + np.sin(angl) * deflbgalrttr
deflbgal = -np.sin(angl) * defllgalrttr + np.cos(angl) * deflbgalrttr
defl = np.vstack((defllgal, deflbgal)).T
return defl
def retr_lpriselfdist(gdat, strgmodl, feat, strgfeat):
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
lpri = np.sum(np.log(pdfn_self(feat, minm, maxm)))
return lpri
def retr_lprilogtdist(gdat, strgmodl, feat, strgfeat):
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
lpri = np.sum(np.log(pdfn_logt(feat, minm, maxm)))
return lpri
def retr_lpripowrdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
gmod = getattr(gdat, strgmodl)
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + strgfeat + 'pop%d' % l)]
lpri = np.sum(np.log(pdfn_powr(feat, minm, maxm, slop)))
return lpri
def retr_lpridpowdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
minm = getattr(gmod.minmpara, strgfeat)
maxm = getattr(gmod.maxmpara, strgfeat)
brek = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'distbrek')[l]]
sloplowr = paragenrscalfull[getattr(gmod.indxpara, 'sloplowrprio' + strgfeat)[l]]
slopuppr = paragenrscalfull[getattr(gmod.indxpara, 'slopupprprio' + strgfeat)[l]]
lpri = np.sum(np.log(pdfn_dpow(feat, minm, maxm, brek, sloplowr, slopuppr)))
return lpri
def retr_lprigausdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
distmean = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'diststdv')[l]]
lpri = np.sum(np.log(pdfn_gaus(feat, distmean, diststdv)))
return lpri
def retr_lpriigamdist(gdat, strgmodl, feat, strgfeat, paragenrscalfull, l):
slop = paragenrscalfull[getattr(gmod.indxpara, strgfeat + 'slop')[l]]
cutf = getattr(gmod, 'cutf' + strgfeat)
lpri = np.sum(np.log(pdfn_igam(feat, slop, cutf)))
return lpri
def traptdim(gdat, arry):
s1 = arry[0, 0] + arry[-1, 0] + arry[0, -1] + arry[-1, -1]
s2 = np.sum(arry[1:-1, 0]) + np.sum(arry[1:-1, -1]) + np.sum(arry[0, 1:-1]) + np.sum(arry[-1, 1:-1])
s3 = np.sum(arry[1:-1, 1:-1])
summ = (s1 + 2*s2 + 4*s3) * gdat.apix
return summ
def retr_spatprio(gdat, pdfnspatpriotemp, spatdistcons=None):
pdfnspatprio = pdfnspatpriotemp
if spatdistcons is not None:
pdfnspatprio += spatdistcons
summ = traptdim(gdat, pdfnspatprio)
pdfnspatprio /= summ
lpdfspatprio = np.log(pdfnspatprio)
lpdfspatprioobjt = sp.interpolate.RectBivariateSpline(gdat.binspara.bgalcart, gdat.binspara.lgalcart, lpdfspatprio)
return lpdfspatprio, lpdfspatprioobjt
def retr_gdatobjt(gdat, gdatmodi, strgmodl, boolinit=False):
if strgmodl == 'true':
gdatobjt = gdat.true
elif strgmodl == 'fitt' and boolinit:
gdatobjt = gdat.fitt
else:
gdatobjt = gdatmodi
return gdatobjt
def proc_samp(gdat, gdatmodi, strgstat, strgmodl, fast=False, boolinit=False):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl, boolinit=boolinit)
gmodstat = getattr(gdatobjt, strgstat)
initchro(gdat, gdatmodi, 'pars')
# grab the sample vector
indxpara = np.arange(gmodstat.paragenrscalfull.size)
if gdat.booldiagmode:
if not np.isfinite(gmodstat.paragenrscalfull).all():
raise Exception('')
if gmod.typeevalpsfn != 'none' and (strgmodl == 'true' or boolinit or gdat.boolmodipsfn):
psfp = gmodstat.paragenrscalfull[gmod.indxpara.psfp]
if gdat.booldiagmode:
if np.where(psfp == 0)[0].size == psfp.size:
raise Exception('')
setattr(gmodstat, 'psfp', psfp)
bacp = gmodstat.paragenrscalfull[gmod.indxpara.bacp]
if gmod.numbparaelem > 0:
# temp -- this may slow down execution
gmodstat.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodstat.indxelemfull, strgmodl)
gmodstat.numbelem = np.empty(gmod.numbpopl, dtype=int)
indxelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmodstat.numbelem[l] = gmodstat.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int)
indxelem[l] = np.arange(gmodstat.numbelem[l])
gmodstat.numbelem[l] = np.sum(gmodstat.numbelem[l])
gmodstat.numbelemtotl = np.sum(gmodstat.numbelem)
gmodstat.dictelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmodstat.dictelem[l] = dict()
for strgfeat in gmod.namepara.genrelemdefa:
gmodstat.dictelem[l][strgfeat] = []
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmodstat.dictelem[l][nameparagenrelem] = gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]
if gdat.booldiagmode:
if ((abs(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]) < 1e-100 ) & (abs(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l][nameparagenrelem]]) > 0.)).any():
raise Exception('')
if gmodstat.numbelem[l] != len(gmodstat.dictelem[l][nameparagenrelem]):
print('l')
print(l)
print('numbelem')
print(numbelem)
print('gmodstat.dictelem')
print(gmodstat.dictelem)
print('nameparagenrelem')
print(nameparagenrelem)
raise Exception('')
if gdat.boolbinsener:
if gdat.typeverb > 2:
print('Calculating element spectra...')
initchro(gdat, gdatmodi, 'spec')
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], curv=gmodstat.dictelem[l]['curv'], \
expc=gmodstat.dictelem[l]['expc'], sindcolr=sindcolr, spectype=gmod.spectype[l])
if gmod.typeelem[l].startswith('lghtline'):
if gmod.typeelem[l] == 'lghtlinevoig':
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], sigm=gmodstat.dictelem[l]['sigm'], \
gamm=gmodstat.dictelem[l]['gamm'], spectype=gmod.spectype[l])
else:
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], \
edisintp=gdat.edisintp, spectype=gmod.spectype[l])
stopchro(gdat, gdatmodi, 'spec')
if gdat.typeverb > 2:
print('Element features:')
for l in gmod.indxpopl:
print('l')
print(l)
for strgfeat in gmod.namepara.genrelem[l]:
print(strgfeat)
print(gmodstat.dictelem[l][strgfeat])
if gdat.booldiagmode:
for l in gmod.indxpopl:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if (gmod.listscalparagenrelem[l][g] != 'gaus' and not gmod.listscalparagenrelem[l][g].startswith('lnor')) and \
(gmod.listscalparagenrelem[l][g] != 'expo' and (gmodstat.dictelem[l][nameparagenrelem] < getattr(gmod.minmpara, nameparagenrelem)).any()) or \
(gmodstat.dictelem[l][nameparagenrelem] > getattr(gmod.maxmpara, nameparagenrelem)).any():
print('l, g')
print(l, g)
print('nameparagenrelem')
print(nameparagenrelem)
print('gmodstat.dictelem[l][nameparagenrelem]')
summgene(gmodstat.dictelem[l][nameparagenrelem])
print('getattr(gmod, minm + nameparagenrelem)')
print(getattr(gmod.minmpara, nameparagenrelem))
print('getattr(gmod, maxm + nameparagenrelem)')
print(getattr(gmod.maxmpara, nameparagenrelem))
print('gmod.listscalparagenrelem[l][g]')
print(gmod.listscalparagenrelem[l][g])
raise Exception('')
# calculate element spectra
# temp
if gdat.booldiagmode:
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
if gdat.variasca:
indx = np.where(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l]['acut']] < 0.)[0]
if indx.size > 0:
raise Exception('')
if gdat.variacut:
indx = np.where(gmodstat.paragenrscalfull[gmodstat.indxparagenrfullelem[l]['asca']] < 0.)[0]
if indx.size > 0:
raise Exception('')
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
# evaluate horizontal and vertical position for elements whose position is a power law in image-centric radius
if gmod.typespatdist[l] == 'glc3':
gmodstat.dictelem[l]['dlos'], gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'] = retr_glc3(gmodstat.dictelem[l]['dglc'], \
gmodstat.dictelem[l]['thet'], gmodstat.dictelem[l]['phii'])
if gmod.typespatdist[l] == 'gangexpo':
gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'], = retr_lgalbgal(gmodstat.dictelem[l]['gang'], \
gmodstat.dictelem[l]['aang'])
if gdat.booldiagmode:
if gmodstat.numbelem[l] > 0:
if np.amin(gmodstat.dictelem[l]['lgal']) < gmod.minmlgal or \
np.amax(gmodstat.dictelem[l]['lgal']) > gmod.maxmlgal or \
np.amin(gmodstat.dictelem[l]['bgal']) < gmod.minmbgal or \
np.amax(gmodstat.dictelem[l]['bgal']) > gmod.maxmbgal:
raise Exception('Bad coordinates!')
if gmod.typespatdist[l] == 'los3':
gmodstat.dictelem[l]['dglc'], gmodstat.dictelem[l]['thet'], gmodstat.dictelem[l]['phii'] = retr_los3(gmodstat.dictelem[l]['dlos'], \
gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])
# evaluate flux for pulsars
if gmod.typeelem[l] == 'lghtpntspuls':
gmodstat.dictelem[l]['lumi'] = retr_lumipuls(gmodstat.dictelem[l]['geff'], gmodstat.dictelem[l]['magf'], gmodstat.dictelem[l]['per0'])
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmodstat.dictelem[l]['reds'] = gdat.redsfromdlosobjt(gmodstat.dictelem[l]['dlos'])
gmodstat.dictelem[l]['lumi'] = gmodstat.dictelem[l]['lum0'] * (1. + gmodstat.dictelem[l]['reds'])**4
if gmod.typeelem[l] == 'lghtpntspuls' or gmod.typeelem[l] == 'lghtpntsagnntrue':
gmodstat.dictelem[l]['flux'] = retr_flux(gdat, gmodstat.dictelem[l]['lumi'], gmodstat.dictelem[l]['dlos'])
# evaluate spectra
if gmod.typeelem[l].startswith('lghtline'):
if gmod.typeelem[l] == 'lghtlinevoig':
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], sigm=gmodstat.dictelem[l]['sigm'], \
gamm=gmodstat.dictelem[l]['gamm'], spectype=gmod.spectype[l])
else:
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], elin=gmodstat.dictelem[l]['elin'], edisintp=gdat.edisintp, spectype=gmod.spectype[l])
else:
sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]
gmodstat.dictelem[l]['spec'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], curv=gmodstat.dictelem[l]['curv'], \
expc=gmodstat.dictelem[l]['expc'], sindcolr=sindcolr, spectype=gmod.spectype[l])
stopchro(gdat, gdatmodi, 'pars')
### loglikelihood
initchro(gdat, gdatmodi, 'modl')
if gmod.boollens:
lgalsour = gmodstat.paragenrscalfull[gmod.indxpara.lgalsour]
bgalsour = gmodstat.paragenrscalfull[gmod.indxpara.bgalsour]
if gdat.typeverb > 2:
print('Evaluating the likelihood...')
# process a sample vector and the occupancy list to calculate secondary variables
if gmod.boollens:
fluxsour = gmodstat.paragenrscalfull[gmod.indxpara.fluxsour]
if gdat.numbener > 1:
sindsour = gmodstat.paragenrscalfull[gmod.indxpara.sindsour]
sizesour = gmodstat.paragenrscalfull[gmod.indxpara.sizesour]
ellpsour = gmodstat.paragenrscalfull[gmod.indxpara.ellpsour]
anglsour = gmodstat.paragenrscalfull[gmod.indxpara.anglsour]
if gmod.typeemishost != 'none':
lgalhost = [[] for e in gmod.indxsersfgrd]
bgalhost = [[] for e in gmod.indxsersfgrd]
fluxhost = [[] for e in gmod.indxsersfgrd]
if gdat.numbener > 1:
sindhost = [[] for e in gmod.indxsersfgrd]
sizehost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
lgalhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'lgalhostisf%d' % e)]
bgalhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'bgalhostisf%d' % e)]
fluxhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'fluxhostisf%d' % e)]
if gdat.numbener > 1:
sindhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sindhostisf%d' % e)]
sizehost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sizehostisf%d' % e)]
if gmod.boollens:
beinhost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
beinhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'beinhostisf%d' % e)]
if gmod.typeemishost != 'none':
ellphost = [[] for e in gmod.indxsersfgrd]
anglhost = [[] for e in gmod.indxsersfgrd]
serihost = [[] for e in gmod.indxsersfgrd]
for e in gmod.indxsersfgrd:
ellphost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'ellphostisf%d' % e)]
anglhost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'anglhostisf%d' % e)]
serihost[e] = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'serihostisf%d' % e)]
if gmod.boollens:
numbpixltemp = gdat.numbpixlcart
defl = np.zeros((numbpixltemp, 2))
# determine the indices of the pixels over which element kernels will be evaluated
if gdat.boolbinsspat:
if gmod.numbparaelem > 0:
listindxpixlelem = [[] for l in gmod.indxpopl]
listindxpixlelemconc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmodstat.numbelem[l] > 0:
listindxpixlelem[l], listindxpixlelemconc[l] = retr_indxpixlelemconc(gdat, strgmodl, gmodstat.dictelem, l)
if gmod.boollens:
sherextr = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sherextr')]
sangextr = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'sangextr')]
## host halo deflection
initchro(gdat, gdatmodi, 'deflhost')
deflhost = [[] for e in gmod.indxsersfgrd]
indxpixlmiss = gdat.indxpixlcart
for e in gmod.indxsersfgrd:
if gdat.typeverb > 2:
print('Evaluating the deflection field due to host galaxy %d' % e)
print('lgalhost[e]')
print(lgalhost[e])
print('bgalhost[e]')
print(bgalhost[e])
print('beinhost[e]')
print(beinhost[e])
print('ellphost[e]')
print(ellphost[e])
print('anglhost[e]')
print(anglhost[e])
deflhost[e] = retr_defl(gdat, indxpixlmiss, lgalhost[e], bgalhost[e], beinhost[e], ellp=ellphost[e], angl=anglhost[e])
if gdat.booldiagmode:
indxpixltemp = slice(None)
setattr(gmodstat, 'deflhostisf%d' % e, deflhost[e])
if gdat.typeverb > 2:
print('deflhost[e]')
summgene(deflhost[e])
defl += deflhost[e]
if gdat.typeverb > 2:
print('After adding the host deflection...')
print('defl')
summgene(defl)
if gdat.booldiagmode:
if not np.isfinite(deflhost).all():
raise Exception('')
stopchro(gdat, gdatmodi, 'deflhost')
## external shear
initchro(gdat, gdatmodi, 'deflextr')
deflextr = []
indxpixltemp = gdat.indxpixlcart
deflextr = retr_deflextr(gdat, indxpixltemp, sherextr, sangextr)
defl += deflextr
if gdat.typeverb > 2:
print('After adding the external deflection...')
print('defl')
summgene(defl)
stopchro(gdat, gdatmodi, 'deflextr')
# Boolean flag to indicate that the object to convolve the image will be needed
boolneedpsfnconv = gdat.typepixl == 'cart' and (gmod.typeevalpsfn == 'conv' or gmod.typeevalpsfn == 'full')
## Boolean flag to indicate that the object to convolve the image will be constructed
boolcalcpsfnconv = strgmodl == 'true' or boolinit or gdat.boolmodipsfn
# get the convolution object
if boolneedpsfnconv and boolcalcpsfnconv:
initchro(gdat, gdatmodi, 'psfnconv')
if gdat.typeverb > 2:
print('Evaluating the PSF convolution kernel...')
psfnconv = [[[] for i in gdat.indxener] for m in gdat.indxevtt]
if gdat.typepixl == 'cart':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
for mm, m in enumerate(gdat.indxevtt):
for ii, i in enumerate(gdat.indxener):
if gmod.typemodlpsfn == 'singgaus':
sigm = psfp[i+m*gdat.numbener]
else:
sigm = fwhm[i, m] / 2.355
gmodstat.psfnconv[mm][ii] = AiryDisk2DKernel(sigm / gdat.sizepixl)
stopchro(gdat, gdatmodi, 'psfnconv')
if (gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full') and gmod.numbparaelem > 0:
if strgmodl == 'true' or boolinit or gdat.boolmodipsfn:
if gdat.typepixl == 'heal':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
gmodstat.psfnintp = sp.interpolate.interp1d(gdat.binspara.angl, gmodstat.psfn, axis=1, fill_value='extrapolate')
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
if gdat.typepixl == 'cart':
if gdat.kernevaltype == 'ulip':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
gmodstat.psfnintp = sp.interpolate.interp1d(gdat.binspara.angl, gmodstat.psfn, axis=1, fill_value='extrapolate')
if gdat.booldiagmode:
if not np.isfinite(gmodstat.psfnintp(0.05)).all():
raise Exception('')
if gdat.kernevaltype == 'bspx':
gmodstat.psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.anglcart.flatten(), gmod.typemodlpsfn, strgmodl)
# side length of the upsampled kernel
gdat.numbsidekernusam = 100
# side length of the original kernel
gdat.numbsidekern = gdat.numbsidekernusam / factkernusam
gdat.indxsidekern = np.arange(gdat.numbsidekern)
# pad by one row and one column
#psf = np.zeros((gdat.numbsidekernusam+1, gdat.numbsidekernusam+1))
#psf[0:gdat.numbsidekernusam, 0:gdat.numbsidekernusam] = psf0
# make design matrix for each factkernusam x factkernusam region
nx = factkernusam + 1
y, x = mgrid[0:nx, 0:nx] / float(factkernusam)
x = x.flatten()
y = y.flatten()
kernmatrdesi = np.array([full(nx*nx, 1), x, y, x*x, x*y, y*y, x*x*x, x*x*y, x*y*y, y*y*y]).T
# output np.array of coefficients
gmodstat.psfnintp = np.empty((gdat.numbsidekern, gdat.numbsidekern, kernmatrdesi.shape[1]))
# solve p = kernmatrdesi psfnintp for psfnintp
for iy in gdat.indxsidekern:
for ix in gdat.indxsidekern:
p = psf[iy*factkernusam:(iy+1)*factkernusam+1, ix*factkernusam:(ix+1)*factkernusam+1].flatten()
gmodstat.psfnintp[iy, ix, :] = dot(linalg.inv(dot(kernmatrdesi.T, kernmatrdesi)), dot(kernmatrdesi.T, p))
else:
gmodstat.psfnintp = gdat.fitt.this.psfnintp
sbrt = dict()
for name in gmod.listnamediff:
sbrt[name] = []
if gmod.numbparaelem > 0:
if gmod.boolelemsbrtdfncanyy:
sbrtdfnc = []
if gmod.boolelemsbrtextsbgrdanyy:
sbrtextsbgrd = []
if gmod.boolelemdeflsubhanyy:
deflsubh = []
# retrieve or initialize state variable
if gmod.boolelemsbrtdfncanyy:
sbrtdfnc = np.zeros_like(gdat.expo)
if gmod.boolelemdeflsubhanyy:
deflsubh = np.zeros((gdat.numbpixl, 2))
if gmod.boolelemsbrtextsbgrdanyy:
sbrtextsbgrd = np.zeros_like(gdat.expo)
# element kernel evaluation
if gmod.boolelemsbrtdfncanyy:
initchro(gdat, gdatmodi, 'elemsbrtdfnc')
sbrt['dfnc'] = []
for l in gmod.indxpopl:
if gmod.boolelemsbrtdfnc[l]:
for k in range(gmodstat.numbelem[l]):
if gmod.boolelemlght[l]:
varbamplextd = gmodstat.dictelem[l]['spec'][:, k]
if gmod.typeelem[l].startswith('clus'):
varbamplextd = gmodstat.dictelem[l]['nobj'][None, k]
if gmod.typeelem[l] == 'clusvari':
sbrtdfnc[0, listindxpixlelem[l][k], 0] += gmodstat.dictelem[l]['nobj'][k] / 2. / np.pi / gmodstat.dictelem[l]['gwdt'][k]**2 * \
np.exp(-0.5 * ((gmodstat.dictelem[l]['lgal'][k] - gdat.lgalgrid[listindxpixlelem[l][k]])**2 + \
(gmodstat.dictelem[l]['bgal'][k] - gdat.bgalgrid[listindxpixlelem[l][k]])**2) / gmodstat.dictelem[l]['gwdt'][k]**2)
if gmod.boolelempsfn[l]:
print('sbrtdfnc')
summgene(sbrtdfnc)
sbrtdfnc[:, listindxpixlelem[l][k], :] += retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], \
gmodstat.dictelem[l]['bgal'][k], varbamplextd, gmodstat.psfnintp, listindxpixlelem[l][k])
if gmod.typeelem[l].startswith('lghtline'):
sbrtdfnc[:, 0, 0] += gmodstat.dictelem[l]['spec'][:, k]
sbrt['dfnc'] = sbrtdfnc
if gdat.booldiagmode:
if not np.isfinite(sbrtdfnc).all():
raise Exception('Element delta function brightness not finite.')
setattr(gmodstat, 'sbrtdfnc', sbrt['dfnc'])
if gdat.booldiagmode:
cntppntschec = retr_cntp(gdat, sbrt['dfnc'])
numbelemtemp = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrtdfnc[l]:
numbelemtemp += np.sum(gmodstat.numbelem[l])
if np.amin(cntppntschec) < -0.1:
raise Exception('Point source spectral surface brightness is not positive-definite.')
stopchro(gdat, gdatmodi, 'elemsbrtdfnc')
if gmod.boolelemdeflsubhanyy:
initchro(gdat, gdatmodi, 'elemdeflsubh')
if gdat.typeverb > 2:
print('Perturbing subhalo deflection field')
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
for kk, k in enumerate(indxelem[l]):
asca = gmodstat.dictelem[l]['asca'][k]
acut = gmodstat.dictelem[l]['acut'][k]
if gmod.typeelemspateval[l] == 'locl':
indxpixl = listindxpixlelem[l][kk]
else:
indxpixl = gdat.indxpixl
deflsubh[indxpixl, :] += retr_defl(gdat, indxpixl, \
gmodstat.dictelem[l]['lgal'][kk], gmodstat.dictelem[l]['bgal'][kk], gmodstat.dictelem[l]['defs'][kk], \
asca=asca, acut=acut)
# temp -- find out what is causing the features in the element convergence maps
#for kk, k in enumerate(indxelem[l]):
# indxpixlpnts = retr_indxpixl(gdat, gmodstat.dictelem[l]['bgal'][kk], gmodstat.dictelem[l]['lgal'][kk])
# if deflsubh[listindxpixlelem[l][kk], :]
if gdat.typeverb > 2:
print('deflsubh')
summgene(deflsubh)
setattr(gmodstat, 'deflsubh', deflsubh)
if gdat.booldiagmode:
if not np.isfinite(deflsubh).all():
raise Exception('Element deflection is not finite.')
defl += deflsubh
if gdat.typeverb > 2:
print('After adding subhalo deflection to the total deflection')
print('defl')
summgene(defl)
stopchro(gdat, gdatmodi, 'elemdeflsubh')
if gmod.boolelemsbrtextsbgrdanyy:
initchro(gdat, gdatmodi, 'elemsbrtextsbgrd')
if strgstat == 'this':
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtgausbgrd':
for k in range(gmodstat.numbelem[l]):
sbrtextsbgrd[:, listindxpixlelem[l][k], :] += gmodstat.dictelem[l]['spec'][:, k, None, None] / \
2. / np.pi / gmodstat.dictelem[l]['gwdt'][k]**2 * \
np.exp(-0.5 * ((gmodstat.dictelem[l]['lgal'][k] - gdat.lgalgrid[None, listindxpixlelem[l][k], None])**2 + \
(gmodstat.dictelem[l]['bgal'][k] - gdat.bgalgrid[None, listindxpixlelem[l][k], None])**2) / gmodstat.dictelem[l]['gwdt'][k]**2)
setattr(gmodstat, 'sbrtextsbgrd', sbrtextsbgrd)
sbrt['extsbgrd'] = []
sbrt['extsbgrd'] = sbrtextsbgrd
if gdat.booldiagmode:
cntppntschec = retr_cntp(gdat, sbrt['extsbgrd'])
if np.amin(cntppntschec) < -0.1:
raise Exception('Point source spectral surface brightness is not positive-definite.')
stopchro(gdat, gdatmodi, 'elemsbrtextsbgrd')
if gdat.typeverb > 2:
print('Element related state variables after perturbations...')
if gmod.boolelemsbrtdfncanyy:
print('sbrtdfnc')
summgene(sbrtdfnc)
if gmod.boolelemdeflsubhanyy:
print('deflsubh')
summgene(deflsubh)
if gmod.boolelemsbrtextsbgrdanyy:
print('sbrtextsbgrd')
summgene(sbrtextsbgrd)
if gmod.boollens:
# lensed surface brightness
initchro(gdat, gdatmodi, 'sbrtlens')
if gdat.typeverb > 2:
print('Evaluating lensed surface brightness...')
if strgstat == 'this' or gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
sbrt['bgrd'] = []
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
sbrt['bgrdgalx'] = []
if gdat.numbener > 1:
specsour = retr_spec(gdat, np.array([fluxsour]), sind=np.array([sindsour]))
if gdat.typeverb > 2:
print('sindsour')
print(sindsour)
else:
specsour = np.array([fluxsour])
if gdat.typeverb > 2:
print('lgalsour')
print(lgalsour)
print('bgalsour')
print(bgalsour)
print('sizesour')
print(sizesour)
print('ellpsour')
print(ellpsour)
print('anglsour')
print(anglsour)
print('fluxsour')
print(fluxsour)
print('specsour')
print(specsour)
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
if gdat.typeverb > 2:
print('Interpolating the background emission...')
sbrt['bgrdgalx'] = retr_sbrtsers(gdat, gdat.lgalgrid[indxpixlelem[0]], gdat.bgalgrid[indxpixlelem[0]], \
lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)
if gdat.typeverb > 2:
print('sbrt[bgrdgalx]')
summgene(sbrt['bgrdgalx'])
print('sbrtextsbgrd')
summgene(sbrtextsbgrd)
sbrt['bgrd'] = sbrt['bgrdgalx'] + sbrtextsbgrd
sbrt['lens'] = np.empty_like(gdat.cntpdata)
for ii, i in enumerate(gdat.indxener):
for mm, m in enumerate(gdat.indxevtt):
sbrtbgrdobjt = sp.interpolate.RectBivariateSpline(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart, \
sbrt['bgrd'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)).T)
bgalprim = gdat.bgalgrid[indxpixlelem[0]] - defl[indxpixlelem[0], 1]
lgalprim = gdat.lgalgrid[indxpixlelem[0]] - defl[indxpixlelem[0], 0]
# temp -- T?
sbrt['lens'][ii, :, m] = sbrtbgrdobjt(bgalprim, lgalprim, grid=False).flatten()
else:
if gdat.typeverb > 2:
print('Not interpolating the background emission...')
sbrt['lens'] = retr_sbrtsers(gdat, gdat.lgalgrid - defl[gdat.indxpixl, 0], \
gdat.bgalgrid - defl[gdat.indxpixl, 1], \
lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)
sbrt['bgrd'] = retr_sbrtsers(gdat, gdat.lgalgrid, \
gdat.bgalgrid, \
lgalsour, bgalsour, specsour, sizesour, ellpsour, anglsour)
setattr(gmodthis, 'sbrtlens', sbrt['lens'])
if gdat.booldiagmode:
if not np.isfinite(sbrt['lens']).all():
raise Exception('Lensed emission is not finite.')
if (sbrt['lens'] == 0).all():
raise Exception('Lensed emission is zero everynp.where.')
stopchro(gdat, gdatmodi, 'sbrtlens')
### background surface brightness
sbrtback = []
# temp
#sbrtback = np.empty((numbback, gdat.numbener, indxpixlelem[yy].size, gdat.numbevtt))
# evaluate host galaxy surface brightness
if gmod.typeemishost != 'none':
initchro(gdat, gdatmodi, 'sbrthost')
for e in gmod.indxsersfgrd:
if gdat.typeverb > 2:
print('Evaluating the host galaxy surface brightness...')
if gdat.numbener > 1:
spechost = retr_spec(gdat, np.array([fluxhost[e]]), sind=np.array([sindhost[e]]))
else:
spechost = np.array([fluxhost[e]])
if gdat.typeverb > 2:
print('lgalhost[e]')
print(lgalhost[e] * gdat.anglfact)
print('bgalhost[e]')
print(bgalhost[e] * gdat.anglfact)
print('spechost')
print(spechost)
print('sizehost[e]')
print(sizehost[e])
print('ellphost[e]')
print(ellphost[e])
print('anglhost[e]')
print(anglhost[e])
print('serihost[e]')
print(serihost[e])
sbrt['hostisf%d' % e] = retr_sbrtsers(gdat, gdat.lgalgrid, gdat.bgalgrid, lgalhost[e], \
bgalhost[e], spechost, sizehost[e], ellphost[e], anglhost[e], serihost[e])
setattr(gmodstat, 'sbrthostisf%d' % e, sbrt['hostisf%d' % e])
#sbrthost = sbrt['host']
if gdat.typeverb > 2:
for e in gmod.indxsersfgrd:
print('e')
print(e)
print('sbrt[hostisf%d]')
summgene(sbrt['hostisf%d' % e])
stopchro(gdat, gdatmodi, 'sbrthost')
## model emission
initchro(gdat, gdatmodi, 'sbrtmodl')
if gdat.typeverb > 2:
print('Summing up the model emission...')
sbrt['modlraww'] = np.zeros((gdat.numbener, gdat.numbpixlcart, gdat.numbevtt))
for name in gmod.listnamediff:
if name.startswith('back'):
gmod.indxbacktemp = int(name[4:8])
if gdat.typepixl == 'heal' and (gmod.typeevalpsfn == 'full' or gmod.typeevalpsfn == 'conv') and not gmod.boolunifback[gmod.indxbacktemp]:
sbrttemp = getattr(gmod, 'sbrtbackhealfull')[gmod.indxbacktemp]
else:
sbrttemp = gmod.sbrtbacknorm[gmod.indxbacktemp]
if gmod.boolspecback[gmod.indxbacktemp]:
sbrt[name] = sbrttemp * bacp[gmod.indxbacpback[gmod.indxbacktemp]]
else:
sbrt[name] = sbrttemp * bacp[gmod.indxbacpback[gmod.indxbacktemp][gdat.indxener]][:, None, None]
sbrt['modlraww'] += sbrt[name]
if gdat.booldiagmode:
if np.amax(sbrttemp) == 0.:
raise Exception('')
if gdat.typeverb > 2:
print('name')
print(name)
print('sbrt[name]')
summgene(sbrt[name])
if gdat.typeverb > 2:
for ii, i in enumerate(gdat.indxener):
print('ii, i')
print(ii, i)
for mm, m in enumerate(gdat.indxevtt):
print('mm, m')
print(mm, m)
print('sbrt[modlraww][ii, :, mm]')
summgene(sbrt['modlraww'][ii, :, mm])
# convolve the model with the PSF
if gmod.convdiffanyy and (gmod.typeevalpsfn == 'full' or gmod.typeevalpsfn == 'conv'):
sbrt['modlconv'] = []
# temp -- isotropic background proposals are unnecessarily entering this clause
if gdat.typeverb > 2:
print('Convolving the model image with the PSF...')
sbrt['modlconv'] = np.zeros((gdat.numbener, gdat.numbpixl, gdat.numbevtt))
for ii, i in enumerate(gdat.indxener):
for mm, m in enumerate(gdat.indxevtt):
if gdat.strgcnfg == 'pcat_ferm_igal_mock_test':
print('Convolving ii, i, mm, m')
print(ii, i, mm, m)
if gdat.typepixl == 'cart':
if gdat.numbpixl == gdat.numbpixlcart:
sbrt['modlconv'][ii, :, mm] = convolve_fft(sbrt['modlraww'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)), \
psfnconv[mm][ii]).flatten()
else:
sbrtfull = np.zeros(gdat.numbpixlcart)
sbrtfull[gdat.indxpixlrofi] = sbrt['modlraww'][ii, :, mm]
sbrtfull = sbrtfull.reshape((gdat.numbsidecart, gdat.numbsidecart))
sbrt['modlconv'][ii, :, mm] = convolve_fft(sbrtfull, psfnconv[mm][ii]).flatten()[gdat.indxpixlrofi]
indx = np.where(sbrt['modlconv'][ii, :, mm] < 1e-50)
sbrt['modlconv'][ii, indx, mm] = 1e-50
if gdat.typepixl == 'heal':
sbrt['modlconv'][ii, :, mm] = hp.smoothing(sbrt['modlraww'][ii, :, mm], fwhm=fwhm[i, m])[gdat.indxpixlrofi]
sbrt['modlconv'][ii, :, mm][np.where(sbrt['modlraww'][ii, :, mm] <= 1e-50)] = 1e-50
setattr(gmodstat, 'sbrtmodlconv', sbrt['modlconv'])
# temp -- this could be made faster -- need the copy() statement because sbrtdfnc gets added to sbrtmodl afterwards
sbrt['modl'] = np.copy(sbrt['modlconv'])
else:
if gdat.typeverb > 2:
print('Skipping PSF convolution of the model...')
sbrt['modl'] = np.copy(sbrt['modlraww'])
if gdat.typeverb > 2:
print('sbrt[modl]')
summgene(sbrt['modl'])
## add PSF-convolved delta functions to the model
if gmod.numbparaelem > 0 and gmod.boolelemsbrtdfncanyy:
if gdat.typeverb > 2:
print('Adding delta functions into the model...')
print('sbrt[dfnc]')
summgene(sbrt['dfnc'])
sbrt['modl'] += sbrt['dfnc']
stopchro(gdat, gdatmodi, 'sbrtmodl')
if gdat.typeverb > 2:
print('sbrt[modl]')
summgene(sbrt['modl'])
### count map
initchro(gdat, gdatmodi, 'expo')
cntp = dict()
cntp['modl'] = retr_cntp(gdat, sbrt['modl'])
if gdat.booldiagmode:
setattr(gmodstat, 'cntpmodl', cntp['modl'])
stopchro(gdat, gdatmodi, 'expo')
# mock data specific
if strgmodl == 'true' and strgstat == 'this':
# generate count data
cntptemp = np.zeros((gdat.numbener, gdat.numbpixl, gdat.numbevtt))
for i in gdat.indxener:
for j in gdat.indxpixl:
for m in gdat.indxevtt:
cntptemp[i, j, m] = np.random.poisson(cntp['modl'][i, j, m])
setattr(gdat, 'cntpdata', cntptemp)
if not gdat.boolsqzeexpo and np.amax(cntptemp) == 0:
print('cntp[modl]')
summgene(cntp['modl'])
print('gdat.boolsqzeexpo')
print(gdat.boolsqzeexpo)
print('cntptemp')
summgene(cntptemp)
raise Exception('Data is zero.')
proc_cntpdata(gdat)
## diagnostics
if gdat.booldiagmode:
frac = cntp['modl'] / np.mean(cntp['modl'])
if np.amin(frac) < -1e-3 and np.amin(cntp['modl']) < -0.1:
raise Exception('')
indxcubebadd = np.where(cntp['modl'] < 0.)[0]
if indxcubebadd.size > 0:
print('Warning! Model prediction is negative. Correcting to 1e-20...')
cntp['modl'][indxcubebadd] = 1e-20
stopchro(gdat, gdatmodi, 'modl')
# log-prior
initchro(gdat, gdatmodi, 'lpri')
if gdat.typeverb > 2:
print('Evaluating the prior...')
lpri = np.zeros(gmod.numblpri)
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
lpri[0] -= 0.5 * gdat.priofactdoff * gmod.numbparagenrelemsing[l] * gmodstat.numbelem[l]
if gdat.penalpridiff:
sbrtdatapnts = gdat.sbrtdata - sbrt['dfnc']
if gdat.typepixl == 'heal':
raise Exception('')
if gdat.typepixl == 'cart':
psecodimdatapnts = np.empty((gdat.numbener, gdat.numbsidecarthalf, gdat.numbevtt))
psfn = retr_psfn(gdat, psfp, gdat.indxener, gdat.binspara.angl, gmod.typemodlpsfn, strgmodl)
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
sigm = fwhm / 2.355
psecodimdatapntsprio = np.exp(-2. * gdat.meanpara.mpolodim[None, :, None] / (0.1 / sigm[:, None, :]))
lpridiff = 0.
for i in gdat.indxener:
for m in gdat.indxevtt:
psecdatapnts = retr_psec(gdat, sbrtdatapnts[i, :, m])
psecodimdatapnts[i, :, m] = retr_psecodim(gdat, psecdatapnts)
psecodimdatapnts[i, :, m] /= psecodimdatapnts[i, 0, m]
lpridiff += -0.5 * np.sum((psecodimdatapnts[i, :, m] - psecodimdatapntsprio[i, :, m])**2)
setattr(gmodstat, 'psecodimdatapntsen%02devt%d' % (i, m), psecodimdatapnts[i, :, m])
setattr(gmodstat, 'psecodimdatapntsprioen%02devt%d'% (i, m), psecodimdatapntsprio[i, :, m])
lpri[1] = lpridiff
setattr(gmodstat, 'lpridiff', lpridiff)
if gmod.typemodltran == 'pois':
meanelem = gmodstat.paragenrscalfull[gmod.indxpara.meanelem]
for l in gmod.indxpopl:
lpri[2] += retr_lprbpois(gmodstat.numbelem[l], meanelem[l])
for l in gmod.indxpopl:
for g, (strgfeat, strgpdfn) in enumerate(zip(gmod.namepara.genrelem[l], gmod.listscalparagenrelem[l])):
indxlpritemp = 3 + l * gmod.numbparagenrelem + g
lpri[indxlpritemp] = retr_lprielem(gdat, strgmodl, l, g, strgfeat, strgpdfn, gmodstat.paragenrscalfull, gmodstat.dictelem, gmodstat.numbelem)
lpritotl = np.sum(lpri)
if gdat.typeverb > 1:
print('lpritotl')
print(lpritotl)
### log-likelihood
initchro(gdat, gdatmodi, 'llik')
llik = retr_llik(gdat, strgmodl, cntp['modl'])
if gdat.typeverb > 2:
print('cntp[modl]')
summgene(cntp['modl'])
print('np.sum(cntp[modl], (1, 2))')
print(np.sum(cntp['modl'], (1, 2)))
print('np.sum(gdat.cntpdata, (1, 2))')
print(np.sum(gdat.cntpdata, (1, 2)))
if gdat.booldiagmode:
if not np.isfinite(llik).all():
raise Exception('Likelihood is not finite.')
gmodstat.lliktotl = np.sum(llik)
if gdat.booldiagmode:
if isinstance(gmodstat.lliktotl, np.ndarray):
raise Exception('')
if not np.isfinite(gmodstat.lliktotl).all():
raise Exception('')
numbdoff = gdat.numbdata - gmod.numbparagenrbase
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
numbdoff -= len(gmodstat.indxparagenrfullelem[l]['full'])
setattr(gmodstat, 'llik', llik)
setattr(gmodstat, 'llikmean', gmodstat.lliktotl / gdat.numbdata)
setattr(gmodstat, 'llikcmea', gmodstat.lliktotl / (gdat.numbdata - numbdoff))
if gdat.typeverb > 2:
print('llik')
summgene(llik)
if gdat.typeverb > 1:
print('gmodstat.lliktotl')
print(gmodstat.lliktotl)
stopchro(gdat, gdatmodi, 'llik')
lpostotl = lpritotl + gmodstat.lliktotl
if gdat.typeverb > 1:
print('lpostotl')
print(lpostotl)
setattr(gmodstat, 'lpritotl', lpritotl)
setattr(gmodstat, 'gmodstat.lliktotl', gmodstat.lliktotl)
setattr(gmodstat, 'lpostotl', lpostotl)
stopchro(gdat, gdatmodi, 'lpri')
if strgstat == 'next':
return
initchro(gdat, gdatmodi, 'tert')
setattr(gmodstat, 'lpri', lpri)
if gmod.numbparaelem > 0:
setattr(gmodstat, 'lpripena', lpri[0])
dicttert = {}
## load necessary variables
## derived variables
## residual count map
cntp['resi'] = []
cntp['resi'] = gdat.cntpdata - cntp['modl']
setattr(gmodstat, 'cntpmodl', cntp['modl'])
setattr(gmodstat, 'cntpresi', cntp['resi'])
setattr(gmodstat, 'llik', llik)
#if gmod.boollens:
# setattr(gmodstat, 'deflhost', deflhost)
if gmod.boollens:
setattr(gmodstat, 'defl', defl)
for e in gmod.indxsersfgrd:
masshostbein = massfrombein * beinhost[e]**2
setattr(gmodstat, 'masshostisf%dbein' % e, masshostbein)
### sort with respect to deflection at scale radius
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmodstat.numbelem[l] > 0:
indxelemsortampl = np.argsort(gmodstat.dictelem[l][nameparaelemsort[l]])[::-1]
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmodstat.dictelem[l][nameparagenrelem + 'sort'] = gmodstat.dictelem[l][nameparagenrelem][indxelemsortampl]
deflsing = np.zeros((gdat.numbpixlcart, 2, numbdeflsingplot))
conv = np.zeros((gdat.numbpixlcart))
convpsec = np.zeros(((gdat.numbsidecarthalf)**2))
convpsecodim = np.zeros((gdat.numbsidecarthalf))
if gmod.numbparaelem > 0:
if boolelemlens:
gmod.indxpopllens = gmod.typeelem.index('lens')
numbdeflsing = 2
if gmod.numbparaelem > 0:
if boolelemlens:
if numbelem[indxpopllens] > 0:
numbdeflsing += min(numbdeflsubhplot, numbelem[indxpopllens])
numbdeflsing += 1
for k in range(numbdeflsing):
indxpixltemp = gdat.indxpixlcart
if k == 0:
# temp -- should take other sersics into account
deflsing[indxpixltemp, :, k] = deflhost[0]
elif k == 1:
deflsing[indxpixltemp, :, k] = deflextr
elif k == 2:
deflsing[indxpixltemp, :, k] = defl - deflextr - deflhost[0]
else:
asca = gmodstat.dictelem[indxpopllens]['ascasort'][None, k-3]
acut = gmodstat.dictelem[indxpopllens]['acutsort'][None, k-3]
deflsing[listindxpixlelem[indxpopllens][k], :, k] = retr_defl(gdat, listindxpixlelem[indxpopllens][k], \
gmodstat.dictelem[indxpopllens]['lgalsort'][None, k-3], gmodstat.dictelem[indxpopllens]['bgalsort'][None, k-3], \
gmodstat.dictelem[indxpopllens]['defssort'][None, k-3], asca=asca, acut=acut)
# convergence
## total
conv[:] = retr_conv(gdat, defl)
convhost = np.zeros((gmod.numbsersfgrd, gdat.numbpixlcart))
for e in gmod.indxsersfgrd:
convhost[e, :] = retr_conv(gdat, deflhost[e])
### power spectrum
#### two dimensional
convpsec[:] = retr_psec(gdat, conv[:])
#### one dimensional
convpsecodim[:] = retr_psecodim(gdat, convpsec[:])
setattr(gmodstat, 'convpsec', convpsec)
setattr(gmodstat, 'convpsecodim', convpsecodim)
setattr(gmodstat, 'conv', conv[...])
for e in gmod.indxsersfgrd:
setattr(gmodstat, 'convisf%d' % e, convhost[e, ...])
## subhalos
if gmod.numbparaelem > 0:
if boolelemlens:
convelem = np.zeros((gdat.numbpixl))
convpsecelem = np.zeros(((gdat.numbsidecarthalf)**2))
convpsecelemodim = np.zeros((gdat.numbsidecarthalf))
### convergence
convelem[:] = retr_conv(gdat, deflsubh)
### power spectrum
##### two dimensional
convpsecelem[:] = retr_psec(gdat, convelem[:])
##### one dimensional
convpsecelemodim[:] = retr_psecodim(gdat, convpsecelem[:])
setattr(gmodstat, 'convpsecelem', convpsecelem)
setattr(gmodstat, 'convpsecelemodim', convpsecelemodim)
setattr(gmodstat, 'convelem', convelem[...])
setattr(gmodstat, 'defl', defl)
### magnification
magn = np.empty((gdat.numbpixlcart))
histdefl = np.empty((gdat.numbdefl))
if gmod.numbparaelem > 0 and boolelemlens:
histdeflsubh = np.empty((gdat.numbdefl))
deflsingmgtd = np.zeros((gdat.numbpixlcart, numbdeflsingplot))
magn[:] = 1. / retr_invm(gdat, defl)
histdefl[:] = np.histogram(defl, bins=gdat.binspara.defl)[0]
if gmod.numbparaelem > 0:
if boolelemlens:
histdeflsubh[:] = np.histogram(deflsubh, bins=gdat.binspara.deflsubh)[0]
deflsingmgtd[:, :] = np.sqrt(np.sum(deflsing[...]**2, axis=1))
if gmod.numbparaelem > 0:
if boolelemlens:
setattr(gmodstat, 'histdeflsubh', histdeflsubh)
setattr(gmodstat, 'histdefl', histdefl)
setattr(gmodstat, 'magn', magn[...])
setattr(gmodstat, 'deflsing', deflsing[...])
setattr(gmodstat, 'deflsingmgtd', deflsingmgtd[...])
## element related
if gmod.numbparaelem > 0:
if gdat.numbpixl == 1:
for l in gmod.indxpopl:
for k in range(gmodstat.numbelem[l]):
setattr(gmodstat, 'speclinepop%d%04d' % (l, k), gmodstat.dictelem[l]['spec'][:, k])
if gdat.typedata == 'mock' and strgmodl == 'true' and gdat.numbpixl > 1:
gdat.refrlgal = [[] for l in gmod.indxpopl]
gdat.refrbgal = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gdat.refrlgal[l] = np.tile(gmodstat.dictelem[l]['lgal'], [3] + list(np.ones(gmodstat.dictelem[l]['lgal'].ndim, dtype=int)))
gdat.refrbgal[l] = np.tile(gmodstat.dictelem[l]['bgal'], [3] + list(np.ones(gmodstat.dictelem[l]['bgal'].ndim, dtype=int)))
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
gmodstat.dictelem[l]['per1'] = retr_per1(gmodstat.dictelem[l]['per0'], gmodstat.dictelem[l]['magf'])
if gmod.numbparaelem > 0:
if strgstat == 'this' or gdat.boolrefeforc and strgmodl == 'fitt':
# correlate the fitting model elements with the reference elements
if gdat.boolinforefr and not (strgmodl == 'true' and gdat.typedata == 'mock') and gdat.boolasscrefr:
indxelemrefrasschits = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
indxelemfittasschits = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
for q in gdat.indxrefr:
for l in gmod.indxpopl:
if gdat.refr.numbelem[q] == 0:
continue
indxelemfittmatr = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]), dtype=int)
indxelemrefrmatr = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]), dtype=int)
matrdist = np.empty((gdat.refr.numbelem[q], gmodstat.numbelem[l]))
for k in range(gmodstat.numbelem[l]):
# construct a matrix of angular distances between reference and fitting elements
if gmod.typeelem[l].startswith('lghtline'):
matrdist[:, k] = abs(gdat.refrelin[q][0, :] - gmodstat.dictelem[l]['elin'][k]) / gdat.refrelin[q][0, :]
else:
matrdist[:, k] = retr_angldist(gdat, gdat.refr.dictelem[q]['lgal'][0, :], gdat.refr.dictelem[q]['bgal'][0, :], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k])
indxelemrefrmatr[:, k] = np.arange(gdat.refr.numbelem[q])
indxelemfittmatr[:, k] = k
matrdist = matrdist.flatten()
indxelemrefrmatr = indxelemrefrmatr.flatten()
indxelemfittmatr = indxelemfittmatr.flatten()
# take only angular separations smaller than some threshold
indxmatrthrs = np.where(matrdist < gdat.anglassc)
matrdist = matrdist[indxmatrthrs]
indxelemrefrmatr = indxelemrefrmatr[indxmatrthrs]
indxelemfittmatr = indxelemfittmatr[indxmatrthrs]
# sort the remaining associations with respect to distance
indxmatrsort = np.argsort(matrdist)
matrdist = matrdist[indxmatrsort]
indxelemrefrmatr = indxelemrefrmatr[indxmatrsort]
indxelemfittmatr = indxelemfittmatr[indxmatrsort]
for c in range(matrdist.size):
if indxelemrefrmatr[c] in indxelemrefrasschits[q][l] or indxelemfittmatr[c] in indxelemfittasschits[q][l]:
continue
indxelemrefrasschits[q][l].append(indxelemrefrmatr[c])
indxelemfittasschits[q][l].append(indxelemfittmatr[c])
indxelemrefrasschits[q][l] = np.array(indxelemrefrasschits[q][l])
indxelemfittasschits[q][l] = np.array(indxelemfittasschits[q][l])
setattr(gmodstat, 'indxelemrefrasschits', indxelemrefrasschits)
setattr(gmodstat, 'indxelemfittasschits', indxelemfittasschits)
indxelemrefrasscmiss = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
indxelemfittasscfals = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
for q in gdat.indxrefr:
for l in gmod.indxpopl:
# indices of the reference elements not associated with the fitting model elements
if gdat.refr.numbelem[q] > 0:
indxelemrefrasscmiss[q][l] = np.setdiff1d(np.arange(gdat.refr.numbelem[q]), indxelemrefrasschits[q][l])
# indices of the fitting model elements not associated with the reference elements
if gmodstat.numbelem[l] > 0:
indxelemfittasscfals[q][l] = np.setdiff1d(np.arange(gmodstat.numbelem[l]), indxelemfittasschits[q][l])
setattr(gmodstat, 'indxelemrefrasscmiss', indxelemrefrasscmiss)
setattr(gmodstat, 'indxelemfittasscfals', indxelemfittasscfals)
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
for l in gmod.indxpopl:
# collect the associated reference element parameter for each fitting element
for strgfeat in gdat.refr.namepara.elemonly[q][l]:
name = strgfeat + gdat.listnamerefr[q]
if strgfeat != 'spec' and strgfeat != 'specplot':
refrfeat = getattr(gdat.refr, strgfeat)
gmodstat.dictelem[l][name] = np.zeros(gmodstat.numbelem[l])
if len(refrfeat[q]) > 0 and len(indxelemrefrasschits[q][l]) > 0:
gmodstat.dictelem[l][name][indxelemfittasschits[q][l]] = refrfeat[q][0, indxelemrefrasschits[q][l]]
print('temp')
continue
# collect the error in the associated reference element amplitude
for strgfeat in gdat.listnameparaetotlelemcomm[q][l]:
refrfeat = getattr(gdat.refr, strgfeat)
if strgfeat == gmod.nameparagenrelemampl[l] and len(indxelemfittasschits[q][l]) > 0:
gmodstat.dictelem[l]['aerr' + gdat.listnamerefr[q]] = np.zeros(gmodstat.numbelem[l])
fittfeattemp = gmodstat.dictelem[l][strgfeat][indxelemfittasschits[q][l]]
refrfeattemp = refrfeat[q][0, indxelemrefrasschits[q][l]]
if gdat.booldiagmode:
if not np.isfinite(refrfeattemp).all():
raise Exception('')
gmodstat.dictelem[l]['aerr' + gdat.listnamerefr[q]][indxelemfittasschits[q][l]] = 100. * (fittfeattemp - refrfeattemp) / refrfeattemp
if gdat.boolrefeforc and strgmodl == 'fitt':
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
if strgfeat in gdat.refr.namepara.elem[gdat.indxrefrforc[l]]:
if len(indxelemrefrasschits[gdat.indxrefrforc[l]][l]) == 0:
continue
refrfeat = getattr(gdat.refr, strgfeat)[gdat.indxrefrforc[l]][0, indxelemrefrasschits[gdat.indxrefrforc[l]][l]]
if len(gmodstat.dictelem[l][strgfeat]) == 0:
continue
lpritotl += -2. * np.sum(1e6 * (gmodstat.dictelem[l][strgfeat][indxelemfittasschits[gdat.indxrefrforc[l]][l]] - refrfeat)**2 / refrfeat**2)
# other tertiary variables continues
## number of degrees of freedom
chi2doff = np.sum(cntp['resi']**2 / gdat.varidata) / numbdoff
if gdat.booldiagmode:
if not np.isfinite(cntp['resi']).all():
raise Exception('')
if not np.isfinite(numbdoff):
raise Exception('')
if not np.isfinite(chi2doff):
raise Exception('')
setattr(gmodstat, 'numbdoff', numbdoff)
setattr(gmodstat, 'chi2doff', chi2doff)
if gmod.boolelempsfn and gmod.numbparaelem > 0:
gmodstat.fwhmpsfn = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
if gmod.numbparaelem > 0:
### derived parameters
for l in gmod.indxpopl:
# luminosity
if gmod.boolelemlght[l] and 'flux' in gmod.namepara.genrelem[l]:
for strgfeat in gmod.namepara.genrelem[l]:
if strgfeat.startswith('reds') and strgfeat != 'reds':
namerefr = strgfeat[-4:]
gmodstat.dictelem[l]['lumi' + namerefr] = np.zeros(gmodstat.numbelem[l]) + np.nan
gmodstat.dictelem[l]['dlos' + namerefr] = np.zeros(gmodstat.numbelem[l]) + np.nan
reds = gmodstat.dictelem[l]['reds' + namerefr]
indxgood = np.where(np.isfinite(gmodstat.dictelem[l]['reds' + namerefr]))[0]
if indxgood.size > 0:
# temp -- these units only work for energy units of keV
dlos = gdat.adisobjt(reds)
gmodstat.dictelem[l]['dlos' + namerefr][indxgood] = dlos
lumi = retr_lumi(gdat, gmodstat.dictelem[l]['flux'], dlos, reds)
gmodstat.dictelem[l]['lumi' + namerefr][indxgood] = lumi
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmodstat.dictelem[l]['reds'] = gdat.redsfromdlosobjt(gmodstat.dictelem[l]['dlos'])
if gmod.typeelem[l] == 'lghtpntspuls':
gmodstat.dictelem[l]['mass'] = full([numbelem[l]], 3.)
if gdat.typeverb > 2:
print('l')
print(l)
if gdat.boolbinsspat:
#### radial and angular coordinates
gmodstat.dictelem[l]['gang'] = retr_gang(gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])
gmodstat.dictelem[l]['aang'] = retr_aang(gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'])
if gmod.boolelemlght[l]:
#### number of expected counts
if gdat.boolbinsspat:
gmodstat.dictelem[l]['cnts'] = retr_cntspnts(gdat, [gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal']], gmodstat.dictelem[l]['spec'])
else:
gmodstat.dictelem[l]['cnts'] = retr_cntspnts(gdat, [gmodstat.dictelem[l]['elin']], gmodstat.dictelem[l]['spec'])
#### delta log-likelihood
gmodstat.dictelem[l]['deltllik'] = np.zeros(gmodstat.numbelem[l])
if not (strgmodl == 'true' and gdat.checprio):
if gdat.typeverb > 2:
print('Calculating log-likelihood differences when removing elements from the model.')
for k in range(gmodstat.numbelem[l]):
# construct gdatmodi
gdatmoditemp = tdpy.gdatstrt()
gdatmoditemp.this = tdpy.gdatstrt()
gdatmoditemp.next = tdpy.gdatstrt()
gdatmoditemp.this.indxelemfull = gmodstat.indxelemfull
gdatmoditemp.this.paragenrscalfull = gmodstat.paragenrscalfull
gdatmoditemp.this.paragenrunitfull = gmodstat.paragenrunitfull
prop_stat(gdat, gdatmoditemp, strgmodl, deth=True, thisindxpopl=l, thisindxelem=k)
proc_samp(gdat, gdatmoditemp, 'next', strgmodl)#, boolinit=boolinit)
if gdat.booldiagmode:
if not np.isfinite(gmodstat.lliktotl):
raise Exception('')
gdatobjttemp = retr_gdatobjt(gdat, gdatmoditemp, strgmodl)#, boolinit=boolinit)
nextlliktotl = gdatobjttemp.next.lliktotl
gmodstat.dictelem[l]['deltllik'][k] = gmodstat.lliktotl - nextlliktotl
if gdat.typeverb > 2:
print('deltllik calculation ended.')
# more derived parameters
if (gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full') and (strgmodl == 'true' or boolinit or gdat.boolmodipsfn):
### PSF FWHM
if gdat.typepixl == 'cart':
fwhm = 2. * retr_psfnwdth(gdat, gmodstat.psfn, 0.5)
setattr(gmodstat, 'fwhm', fwhm)
if gmod.numbparaelem > 0 and gmod.boolelemsbrtdfncanyy:
if gmod.numbparaelem > 0:
sbrt['dfnctotl'] = np.zeros_like(gdat.expo)
sbrt['dfncsubt'] = np.zeros_like(gdat.expo)
sbrt['dfncsupt'] = np.zeros_like(gdat.expo)
for l in gmod.indxpopl:
if gmod.boolcalcerrr[l]:
sbrt['dfncfull'] = np.zeros_like(gdat.expo)
if gmod.boolelemsbrt[l]:
for k in range(gmodstat.numbelem[l]):
# read normalization from the element dictionary
if gmod.boolelemlght[l]:
varbamplextd = gmodstat.dictelem[l]['spec'][:, k]
if gmod.typeelem[l].startswith('clus'):
varbamplextd = gmodstat.dictelem[l]['nobj'][None, k]
# calculate imprint on the element surface brightness state variable
if gmod.boolelempsfn[l]:
sbrttemp = retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
varbamplextd, gmodstat.psfnintp, listindxpixlelem[l][k])
indxpixltemp = listindxpixlelem[l][k]
if gmod.typeelem[l].startswith('lghtline'):
sbrttemp = gmodstat.dictelem[l]['spec'][:, k, None, None]
# add it to the state variable depending on the significance
sbrt['dfnctotl'][:, indxpixltemp, :] += sbrttemp
if gmodstat.dictelem[l]['deltllik'][k] > 35:
sbrt['dfncsupt'][:, indxpixltemp, :] += sbrttemp
if gmodstat.dictelem[l]['deltllik'][k] < 35:
sbrt['dfncsubt'][:, indxpixltemp, :] += sbrttemp
# calculate imprint without PSF truncation to calculate approximation errors
if gmod.boolcalcerrr[l]:
sbrt['dfncfull'][:, :, :] += retr_sbrtpnts(gdat, gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
varbamplextd, gmodstat.psfnintp, gdat.indxpixl)
setattr(gmodstat, 'sbrtdfncsubtpop%d' % l, sbrt['dfncsubt'])
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
if gdat.booldiagmode:
numbtemp = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrtextsbgrd[l]:
numbtemp += np.sum(gmodstat.numbelem[l])
if numbtemp > 0 and (sbrtextsbgrd == 0.).all():
raise Exception('')
sbrt['bgrdexts'] = sbrtextsbgrd
#### count maps
cntp = dict()
for name in gmod.listnamegcom:
cntp[name] = retr_cntp(gdat, sbrt[name])
setattr(gmodstat, 'cntp' + name, cntp[name])
### spatial averages
sbrtmean = dict()
sbrtstdv = dict()
for name in gmod.listnamegcom:
sbrtmean[name], sbrtstdv[name] = retr_spatmean(gdat, sbrt[name])
for b in gdat.indxspatmean:
setattr(gmodstat, 'sbrt%smea%d' % (name, b), sbrtmean[name][b])
setattr(gmodstat, 'sbrt%sstd%d' % (name, b), sbrtstdv[name][b])
if gmod.numbparaelem > 0:
if gmod.boolelemsbrtdfncanyy:
for i in gdat.indxener:
if 'dark' in gmod.listnamegcom:
fracsdenmeandarkdfncsubt = sbrtmean['dfncsubt'][0][0][i] / (sbrtmean['dfncsubt'][0][0][i] + sbrtmean['dark'][0][0][i])
else:
fracsdenmeandarkdfncsubt = 1.
setattr(gmodstat, 'fracsdenmeandarkdfncsubten%02d' % i, np.array([fracsdenmeandarkdfncsubt]))
if 'dark' in gmod.listnamegcom:
booldfncsubt = float(np.where(sbrtmean['dfncsubt'][0][0] > sbrtmean['dark'][0][0])[0].any())
else:
booldfncsubt = 1.
setattr(gmodstat, 'booldfncsubt', np.array([booldfncsubt]))
# find the 1-point function of the count maps of all emission components including the total emission
for name in gmod.listnamegcom:
namehistcntp = 'histcntp' + name
for m in gdat.indxevtt:
if gdat.numbevtt > 1:
namehistcntp += 'evt%d' % m
for i in gdat.indxener:
if gdat.numbener > 1:
namehistcntp += 'en%02d' % i
histcntp = np.histogram(cntp[name][i, :, m], bins=gdat.binspara.cntpmodl)[0]
setattr(gmodstat, namehistcntp, histcntp)
if False and i == 0 and m == 0 and (name == 'dfnc' or name == 'dfncsubt'):
for strgbins in ['lowr', 'higr']:
strgtemp = 'histcntp' + strgbins + name + 'en%02devt%d' % (i, m)
if strgbins == 'lowr':
setattr(gmod, strgtemp, np.array([float(np.sum(histcntp[:gdat.numbtickcbar-1]))]))
else:
setattr(gmod, strgtemp, np.array([float(np.sum(histcntp[gdat.numbtickcbar-1:]))]))
else:
histcntp = np.histogram(cntp[name][:, 0, m], bins=gdat.binspara.cntpmodl)[0]
setattr(gmodstat, 'histcntp' + name + 'evt%d' % m, histcntp)
if gmod.boollens:
if strgmodl == 'true':
s2nr = []
s2nr = cntp['lens'] / np.sqrt(cntp['modl'])
setattr(gmodstat, 's2nr', s2nr)
cntplensgrad = np.empty((gdat.numbener, gdat.numbpixlcart, gdat.numbevtt, 2))
for i in gdat.indxener:
for m in gdat.indxevtt:
cntplenstemp = np.zeros(gdat.numbpixlcart)
cntplenstemp[gdat.indxpixlrofi] = cntp['lens'][i, :, m]
cntplensgrad[i, :, m, :] = retr_gradmaps(gdat, cntplenstemp) * gdat.sizepixl
cntplensgradmgtd = np.sqrt(np.sum(cntplensgrad**2, axis=3))
cntplensgrad *= gdat.sizepixl
indx = np.where(np.fabs(cntplensgrad) > 1. * gdat.sizepixl)
cntplensgrad[indx] = np.sign(cntplensgrad[indx]) * 1. * gdat.sizepixl
deflmgtd = np.sqrt(np.sum(defl**2, axis=1))
setattr(gmodstat, 'deflmgtd', deflmgtd)
setattr(gmodstat, 'cntplensgrad', cntplensgrad)
setattr(gmodstat, 'cntplensgradmgtd', cntplensgradmgtd)
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmod.boolelemlght[l]:
#### spectra
if gdat.boolbinsspat:
sindcolr = [gmodstat.dictelem[l]['sindcolr%04d' % i] for i in gdat.indxenerinde]
gmodstat.dictelem[l]['specplot'] = retr_spec(gdat, gmodstat.dictelem[l]['flux'], sind=gmodstat.dictelem[l]['sind'], \
curv=gmodstat.dictelem[l]['curv'], expc=gmodstat.dictelem[l]['expc'], \
sindcolr=sindcolr, spectype=gmod.spectype[l], plot=True)
if gdat.typedata == 'inpt':
if gdat.typeexpr == 'ferm':
# temp
try:
gmodstat.dictelem[l]['sbrt0018'] = gdat.sbrt0018objt(gmodstat.dictelem[l]['bgal'], gmodstat.dictelem[l]['lgal'])
except:
gmodstat.dictelem[l]['sbrt0018'] = gmodstat.dictelem[l]['bgal'] * 0.
if gmod.typeelem[l] == 'lens':
#### distance to the source
if gmod.boollens:
gmodstat.dictelem[l]['diss'] = retr_angldist(gdat, gmodstat.dictelem[l]['lgal'], gmodstat.dictelem[l]['bgal'], lgalsour, bgalsour)
if gmod.boollenssubh:
gmodstat.dictelem[l]['deflprof'] = np.empty((gdat.numbanglfull, gmodstat.numbelem[l]))
gmodstat.dictelem[l]['mcut'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['rele'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['reln'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relk'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relf'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['reld'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relc'] = np.empty(gmodstat.numbelem[l])
gmodstat.dictelem[l]['relm'] = np.empty(gmodstat.numbelem[l])
# temp -- this can be placed earlier in the code
cntplensobjt = sp.interpolate.RectBivariateSpline(gdat.meanpara.bgalcart, gdat.meanpara.lgalcart, \
cntp['lens'][ii, :, mm].reshape((gdat.numbsidecart, gdat.numbsidecart)).T)
for k in np.arange(gmodstat.numbelem[l]):
asca = gmodstat.dictelem[l]['asca'][k]
acut = gmodstat.dictelem[l]['acut'][k]
#### deflection profiles
gmodstat.dictelem[l]['deflprof'][:, k] = retr_deflcutf(gdat.meanpara.anglfull, gmodstat.dictelem[l]['defs'][k], asca, acut)
### truncated mass
gmodstat.dictelem[l]['mcut'][k] = retr_mcut(gdat, gmodstat.dictelem[l]['defs'][k], asca, acut, adishost, mdencrit)
#### dot product with the source flux gradient
# temp -- weigh the energy and PSF bins
gmodstat.dictelem[l]['rele'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl)
gmodstat.dictelem[l]['relf'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl, cntpmodl=cntp['modl'][0, :, 0])
deflelem = retr_defl(gdat, gdat.indxpixl, gmodstat.dictelem[l]['lgal'][k], \
gmodstat.dictelem[l]['bgal'][k], gmodstat.dictelem[l]['defs'][k], asca=asca, acut=acut)
bgalprim = gdat.bgalgrid - deflelem[:, 1]
lgalprim = gdat.lgalgrid - deflelem[:, 0]
gmodstat.dictelem[l]['relm'][k] = np.mean(abs(cntp['lens'][0, :, 0] - cntplensobjt(bgalprim, lgalprim, grid=False).flatten()))
gmodstat.dictelem[l]['relk'][k] = gmodstat.dictelem[l]['relm'][k] / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl
gmodstat.dictelem[l]['reln'][k] = gmodstat.dictelem[l]['rele'][k] / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl
gmodstat.dictelem[l]['reld'][k] = retr_rele(gdat, gdat.cntpdata[0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl)
gmodstat.dictelem[l]['relc'][k] = retr_rele(gdat, cntp['lens'][0, :, 0], gmodstat.dictelem[l]['lgal'][k], gmodstat.dictelem[l]['bgal'][k], \
gmodstat.dictelem[l]['defs'][k], asca, acut, gdat.indxpixl, absv=False) / gmodstat.dictelem[l]['defs'][k] * gdat.sizepixl
### distribution of element parameters and features
#### calculate the model filter
listindxelemfilt = [[[] for l in gmod.indxpopl] for namefilt in gdat.listnamefilt]
for k, namefilt in enumerate(gdat.listnamefilt):
for l in gmod.indxpopl:
if namefilt == '':
listindxelemfilt[k][l] = np.arange(gmodstat.numbelem[l])
if namefilt == 'imagbndr':
listindxelemfilt[k][l] = np.where((np.fabs(gmodstat.dictelem[l]['lgal']) < gdat.maxmgangdata) & (np.fabs(gmodstat.dictelem[l]['bgal']) < gdat.maxmgangdata))[0]
if namefilt == 'deltllik':
listindxelemfilt[k][l] = np.where(gmodstat.dictelem[l]['deltllik'] > 0.5 * gmod.numbparagenrelemsing[l])[0]
if namefilt == 'nrel':
listindxelemfilt[k][l] = np.where(gmodstat.dictelem[l]['reln'] > 0.3)[0]
for l in gmod.indxpopl:
# histograms of element parameters
for namefrst in gmod.namepara.elem[l]:
## one dimensional
if namefrst[:-4] == 'etag':
continue
if namefrst == 'specplot' or namefrst == 'deflprof':
continue
elif namefrst == 'spec':
histfrst = np.zeros((gdat.numbbinsplot, gdat.numbener))
for i in gdat.indxener:
histfrst[:, i] = np.histogram(gmodstat.dictelem[l]['spec'][i, listindxelemfilt[0][l]], gdat.binspara.spec)[0]
elif namefrst == 'cnts':
histfrst = np.histogram(gmodstat.dictelem[l]['cnts'][listindxelemfilt[0][l]], gdat.binspara.cnts)[0]
else:
#elif not (namefrst == 'curv' and gmod.spectype[l] != 'curv' or namefrst == 'expc' \
# and gmod.spectype[l] != 'expc' or namefrst.startswith('sindarry') and \
# gmod.spectype[l] != 'colr'):
binsfrst = getattr(gdat.binspara, namefrst)
#if len(gmodstat.dictelem[l][namefrst]) > 0 and len(listindxelemfilt[0][l]) > 0:
histfrst = np.histogram(gmodstat.dictelem[l][namefrst][listindxelemfilt[0][l]], binsfrst)[0]
strgvarb = 'hist' + namefrst + 'pop%d' % l
setattr(gmodstat, strgvarb, histfrst)
#### two dimensional
for nameseco in gmod.namepara.elem[l]:
if namefrst == 'spec' or namefrst == 'specplot' or namefrst == 'deflprof' or \
nameseco == 'spec' or nameseco == 'specplot' or nameseco == 'deflprof':
continue
if not checstrgfeat(namefrst, nameseco):
continue
binsseco = getattr(gdat.binspara, nameseco)
histtdim = np.histogram2d(gmodstat.dictelem[l][namefrst][listindxelemfilt[0][l]], \
gmodstat.dictelem[l][nameseco][listindxelemfilt[0][l]], [binsfrst, binsseco])[0]
setattr(gmodstat, 'hist' + namefrst + nameseco + 'pop%d' % l, histtdim)
### priors on element parameters and features
for nameparagenrelem in gmod.namepara.genrelem[l]:
xdat = gmodstat.dictelem[l][nameparagenrelem]
minm = getattr(gmod.minmpara, nameparagenrelem + 'pop%d' % l)
maxm = getattr(gmod.maxmpara, nameparagenrelem + 'pop%d' % l)
scal = getattr(gmod.scalpara, nameparagenrelem + 'pop%d' % l)
booltemp = False
if scal.startswith('expo') or scal.startswith('dexp'):
if scal.startswith('expo'):
if scal == 'expo':
sexp = getattr(gmod, 'gangdistsexppop%d' % l)
else:
sexp = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distscal')[l]]
pdfn = pdfn_expo(xdat, maxm, sexp)
if scal.startswith('dexp'):
pdfn = pdfn_dnp.exp(xdat, maxm, scal)
booltemp = True
if scal.startswith('self') or scal.startswith('logt'):
if scal.startswith('self'):
pdfn = 1. / (maxm - minm) + np.zeros_like(xdat)
else:
pdfn = 1. / (np.log(maxm) - np.log(minm)) + np.zeros_like(xdat)
booltemp = True
# temp
if scal.startswith('powr'):
slop = gmodstat.paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + nameparagenrelem + 'pop%d' % l)]
pdfn = pdfn_powr(xdat, minm, maxm, slop)
booltemp = True
if scal.startswith('dpowslopbrek'):
pdfn = pdfn_dpow(xdat, minm, maxm, brek, sloplowr, slopuppr)
booltemp = True
if scal == 'lnormeanstdv':
pdfn = pdfn_lnor(xdat, meanlnor, stdvlnor)
booltemp = True
if scal.startswith('igam'):
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
pdfn = pdfn_igam(xdat, slop, cutf)
booltemp = True
if scal.startswith('gaus'):
# this does not work for mismodeling
meanvarb = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
stdv = gmodstat.paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
if nameparagenrelem == 'expc' and gmod.spectype[l] == 'expc':
pdfn = pdfn_gaus(xdat, meanvarb, stdv)
else:
pdfn = pdfn_gaus(xdat, meanvarb, stdv)
booltemp = True
# temp -- meanelem will not be defined
#if booltemp:
# gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'] = gmodstat.numbelem[l] * pdfn * np.interp(xdat, xdatplot, delt)
#setattr(gmodstat, 'hist' + nameparagenrelem + 'pop%dprio' % l, gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'])
#if strgmodl == 'true':
# setattr(gmodstat, 'refrhist' + nameparagenrelem + 'pop%dprio' % l, gmodstat.dictelem[l]['hist' + nameparagenrelem + 'prio'])
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
if gmodstat.numbelem[l] > 0:
## total truncated mass of the subhalo as a cross check
# temp -- generalize
asca = gmodstat.dictelem[l]['asca']
acut = gmodstat.dictelem[l]['acut']
factmcutfromdefs = retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut)
masssubh = np.array([np.sum(factmcutfromdefs * gmodstat.dictelem[l]['defs'])])
## derived variables as a function of other derived variables
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpntspuls'):
massshel = np.empty(gdat.numbanglhalf)
for k in gdat.indxanglhalf:
indxelemshel = np.where((gdat.binspara.anglhalf[k] < gmodstat.dictelem[l]['gang']) & (gmodstat.dictelem[l]['gang'] < gdat.binspara.anglhalf[k+1]))
massshel[k] = np.sum(gmodstat.dictelem[l]['mass'][indxelemshel])
setattr(gmodstat, 'massshelpop%d' % l, massshel)
if gmod.boollens or gmod.numbparaelem > 0 and gmod.boollenssubh:
# find the host, subhalo masses and subhalo mass fraction as a function of halo-centric radius
listnametemp = gdat.liststrgcalcmasssubh
listnamevarbmass = []
listnamevarbmassscal = []
listnamevarbmassvect = []
for e in gmod.indxsersfgrd:
if boolllenshost:
listnamevarbmassscal += ['masshosttotl']
for strgtemp in listnametemp:
listnamevarbmassvect.append('masshostisf%d' % e + strgtemp)
listnamevarbmassscal.append('masshostisf%d' % e + strgtemp + 'bein')
if gmod.numbparaelem > 0 and gmod.boollenssubh:
listnamevarbmassscal.append('masssubhtotl')
listnamevarbmassscal.append('fracsubhtotl')
for strgtemp in listnametemp:
listnamevarbmassvect.append('masssubh' + strgtemp)
listnamevarbmassvect.append('fracsubh' + strgtemp)
listnamevarbmassscal.append('masssubh' + strgtemp + 'bein')
listnamevarbmassscal.append('fracsubh' + strgtemp + 'bein')
for name in listnamevarbmassvect:
dicttert[name] = np.zeros(gdat.numbanglhalf)
if 'isf' in name:
indxisfrtemp = int(name.split('isf')[1][0])
angl = np.sqrt((gdat.meanpara.lgalcartmesh - lgalhost[indxisfrtemp])**2 + (gdat.meanpara.bgalcartmesh - bgalhost[indxisfrtemp])**2).flatten()
for k in gdat.indxanglhalf:
if name[4:8] == 'host':
convtemp = conv[:]
if name[4:8] == 'subh':
convtemp = convelem[:]
if name.endswith('delt'):
indxpixl = np.where((gdat.binspara.anglhalf[k] < angl) & (angl < gdat.binspara.anglhalf[k+1]))[0]
dicttert[name][k] = 1e6 * np.sum(convtemp[indxpixl]) * mdencrit * \
gdat.apix * adishost**2 / 2. / np.pi * gdat.deltanglhalf[k] / gdat.meanpara.anglhalf[k]
if name.endswith('intg'):
indxpixl = np.where(angl < gdat.meanpara.anglhalf[k])[0]
dicttert[name][k] = np.sum(convtemp[indxpixl]) * mdencrit * gdat.apix * adishost**2
if name[:4] == 'frac':
masshosttotl = 0.
for e in gmod.indxsersfgrd:
masshosttotl += dicttert['masshostisf%d' % e + name[-4:]][k]
if masshosttotl != 0.:
dicttert['fracsubh' + name[8:]][k] = dicttert['masssubh' + name[8:]][k] / masshosttotl
setattr(gmodstat, name, dicttert[name])
# interpolate the host, subhalo masses and subhalo mass fraction at the Einstein radius and save it as a scalar variable
dicttert[name + 'bein'] = np.interp(beinhost, gdat.meanpara.anglhalf, dicttert[name])
setattr(gmodstat, name + 'bein', dicttert[name + 'bein'])
#if gmod.numbparaelem > 0:
# ## copy element parameters to the global object
# feat = [[] for l in gmod.indxpopl]
# for l in gmod.indxpopl:
# feat[l] = dict()
# for strgfeat in gmod.namepara.genrelem[l]:
# if strgfeat[:-4] == 'etag':
# continue
# if len(gmodstat.dictelem[l][strgfeat]) > 0:
# if strgmodl == 'true':
# shap = list(np.ones(gmodstat.dictelem[l][strgfeat].ndim, dtype=int))
# feat[l][strgfeat] = np.tile(gmodstat.dictelem[l][strgfeat], [3] + shap)
# if strgmodl == 'fitt':
# feat[l][strgfeat] = gmodstat.dictelem[l][strgfeat]
#
# #for strgfeat in gmod.namepara.elem:
# # feattemp = [[] for l in gmod.indxpopl]
# # for l in gmod.indxpopl:
# # if strgfeat in gmod.namepara.genrelem[l]:
# # if strgfeat in feat[l]:
# # feattemp[l] = feat[l][strgfeat]
# # else:
# # feattemp[l] = np.array([])
# # setattr(gmodstat, strgfeat, feattemp)
# copy true state to the reference state
#if strgmodl == 'true':
# for name, valu in deepcopy(gdat.__dict__).items():
# if name.startswith('true'):
# #indx = name.find('pop')
# #if indx != -1 and not name.endswith('pop') and name[indx+3].isdigit():
# # namerefr = name.replace('pop%s' % name[indx+3], 'ref%s' % name[indx+3])
# #else:
# # namerefr = name
# #namerefr = name
# #namerefr = namerefr.replace('true', 'refr')
# name = name.replace('true', 'refr')
# setattr(gdat, name, valu)
if gmod.numbparaelem > 0 and gdat.priofactdoff != 0.:
if strgmodl == 'true':
for q in gdat.indxrefr:
for strgfeat in gdat.refr.namepara.elem[q]:
if strgfeat == 'spec' or strgfeat == 'specplot' or strgfeat == 'deflprof':
continue
reca = np.zeros(gdat.numbbinsplot) - 1.
indxelempars = np.where(gmodstat.dictelem[q]['deltllik'] > 2.5)[0]
refrhistpars = np.zeros(gdat.numbbinsplot) - 1.
histparaelem = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % q)
indxrefrgood = np.where(histparaelem > 0)[0]
reca[indxrefrgood] = 0.
refrhistpars[indxrefrgood] = 0.
refrhist = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % q)
bins = getattr(gdat.binspara, strgfeat)
if len(indxelempars) > 0:
refrhistpars = np.histogram(gmodstat.dictelem[q][strgfeat][indxelempars], bins=bins)[0].astype(float)
if indxrefrgood.size > 0:
reca[indxrefrgood] = refrhistpars[indxrefrgood] / refrhist[indxrefrgood]
setattr(gmodstat, 'histpars' + strgfeat + 'pop%d' % q, refrhistpars)
setattr(gmodstat, 'reca' + strgfeat + 'pop%d' % q, reca)
print('gdat.rtagmock')
print(gdat.rtagmock)
if gdat.rtagmock is not None:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.genrelem[l]:
if strgfeat == 'spec' or strgfeat == 'specplot' or strgfeat == 'deflprof':# or strgfeat.startswith('aerr'):
continue
if strgfeat in gmod.namepara.genrelem[l]:
hist = getattr(gmodstat, 'hist' + strgfeat + 'pop%d' % l)
reca = getattr(gdat.true.this, 'reca' + strgfeat + 'pop%d' % l)
histcorrreca = hist / reca
setattr(gmodstat, 'histcorrreca' + strgfeat + 'pop%d' % l, histcorrreca)
### Exculusive comparison with the true state
if strgmodl == 'fitt' and gdat.typedata == 'mock':
if gmod.boollens:
numbsingcomm = min(deflsing.shape[2], gmod.deflsing.shape[2])
deflsingresi = deflsing[0, ..., :numbsingcomm] - gmod.deflsing[..., :numbsingcomm]
deflsingresimgtd = np.sqrt(np.sum(deflsingresi**2, axis=1))
deflsingresiperc = 100. * deflsingresimgtd / gmod.deflsingmgtd[..., :numbsingcomm]
setattr(gmodstat, 'numbsingcomm', numbsingcomm)
setattr(gmodstat, 'deflsingresi', deflsingresi)
truedeflmgtd = getattr(gdat.true.this, 'deflmgtd')
truedefl = getattr(gdat.true.this, 'defl')
deflresi = defl - truedefl
deflresimgtd = np.sqrt(np.sum(deflresi**2, axis=1))
deflresiperc = 100. * deflresimgtd / truedeflmgtd
setattr(gmodstat, 'deflresi', deflresi)
setattr(gmodstat, 'deflresimgtd', deflresimgtd)
if gmod.numbparaelem > 0:
trueconvelem = getattr(gdat.true.this, 'convelem')
convelemresi = convelem[:] - trueconvelem
convelemresiperc = 100. * convelemresi / trueconvelem
setattr(gmodstat, 'convelemresi', convelemresi)
setattr(gmodstat, 'convelemresiperc', convelemresiperc)
truemagn = getattr(gdat.true.this, 'magn')
magnresi = magn[:] - truemagn
magnresiperc = 100. * magnresi / truemagn
setattr(gmodstat, 'magnresi', magnresi)
setattr(gmodstat, 'magnresiperc', magnresiperc)
if gmod.numbparaelem > 0:
# correlate the catalog sample with the reference catalog
if gdat.boolinforefr and not (strgmodl == 'true' and gdat.typedata == 'mock') and gdat.boolasscrefr:
for q in gdat.indxrefr:
for l in gmod.indxpopl:
if gdat.refr.numbelem[q] > 0:
cmpl = np.array([float(len(indxelemrefrasschits[q][l])) / gdat.refr.numbelem[q]])
if gdat.booldiagmode:
if cmpl > 1. or cmpl < 0.:
raise Exception('')
else:
cmpl = np.array([-1.])
setattr(gmodstat, 'cmplpop%dpop%d' % (l, q), cmpl)
if gmodstat.numbelem[l] > 0:
fdis = np.array([float(indxelemfittasscfals[q][l].size) / gmodstat.numbelem[l]])
if gdat.booldiagmode:
if fdis > 1. or fdis < 0.:
raise Exception('')
else:
fdis = np.array([-1.])
setattr(gmodstat, 'fdispop%dpop%d' % (q, l), fdis)
# collect the associated fitting element parameter for each reference element
featrefrassc = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
for q in gdat.indxrefr:
for l in gmod.indxpopl:
featrefrassc[q][l] = dict()
for strgfeat in gdat.refr.namepara.elem[q]:
if not strgfeat in gmod.namepara.genrelem[l] or strgfeat in gdat.refr.namepara.elemonly[q][l]:
continue
if isinstance(gmodstat.dictelem[l][strgfeat], np.ndarray) and gmodstat.dictelem[l][strgfeat].ndim > 1:
continue
featrefrassc[q][l][strgfeat] = np.zeros(gdat.refr.numbelem[q]) + np.nan
if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][strgfeat]) > 0:
featrefrassc[q][l][strgfeat][indxelemrefrasschits[q][l]] = gmodstat.dictelem[l][strgfeat][indxelemfittasschits[q][l]]
name = strgfeat + 'asscpop%dpop%d' % (q, l)
setattr(gmodstat, name, featrefrassc[q][l][strgfeat])
# completeness
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
l = gdat.refr.indxpoplfittassc[q]
for nameparaelemfrst in gdat.refr.namepara.elem[q]:
if nameparaelemfrst.startswith('etag'):
continue
if nameparaelemfrst == 'spec' or nameparaelemfrst == 'specplot':
continue
refrfeatfrst = gdat.refr.dictelem[q][nameparaelemfrst][0, :]
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
for nameparaelemseco in gdat.refr.namepara.elem[q]:
if nameparaelemfrst == nameparaelemseco:
continue
if nameparaelemseco.startswith('etag'):
continue
if nameparaelemseco == 'spec' or nameparaelemseco == 'specplot':
continue
if not checstrgfeat(nameparaelemfrst, nameparaelemseco):
continue
# temp -- the size of the cmpl np.array should depend on strgmodl
cmpltdim = np.zeros((gdat.numbbinsplot, gdat.numbbinsplot)) - 1.
if len(indxelemrefrasschits[q][l]) > 0:
refrhistfeattdim = getattr(gdat.refr, 'hist%s%spop%d' % (nameparaelemfrst, nameparaelemseco, q))
refrfeatseco = gdat.refr.dictelem[q][nameparaelemseco][0, :]
binsfeatseco = getattr(gdat.binspara, nameparaelemseco)
refrhistfeattdimassc = np.histogram2d(refrfeatfrst[indxelemrefrasschits[q][l]], \
refrfeatseco[indxelemrefrasschits[q][l]], bins=(binsfeatfrst, binsfeatseco))[0]
indxgood = np.where(refrhistfeattdim != 0.)
if indxgood[0].size > 0:
cmpltdim[indxgood] = refrhistfeattdimassc[indxgood].astype(float) / refrhistfeattdim[indxgood]
if gdat.booldiagmode:
if np.where((cmpltdim[indxgood] > 1.) | (cmpltdim[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'cmpl%s%spop%d' % (nameparaelemfrst, nameparaelemseco, q), cmpltdim)
cmplfrst = np.zeros(gdat.numbbinsplot) - 1.
if len(indxelemrefrasschits[q][l]) > 0:
refrhistfeatfrst = getattr(gdat.refr, 'hist' + nameparaelemfrst + 'pop%d' % q)
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
refrhistfeatfrstassc = np.histogram(refrfeatfrst[indxelemrefrasschits[q][l]], bins=binsfeatfrst)[0]
indxgood = np.where(refrhistfeatfrst != 0.)[0]
if indxgood.size > 0:
cmplfrst[indxgood] = refrhistfeatfrstassc[indxgood].astype(float) / refrhistfeatfrst[indxgood]
if gdat.booldiagmode:
if np.where((cmplfrst[indxgood] > 1.) | (cmplfrst[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'cmpl%spop%d' % (nameparaelemfrst, q), cmplfrst)
# false discovery rate
for l in gmod.indxpopl:
q = gmod.indxpoplrefrassc[l]
for nameparaelemfrst in gmod.namepara.elem[l]:
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
for nameparaelemseco in gmod.namepara.elem[l]:
if not checstrgfeat(nameparaelemfrst, nameparaelemseco):
continue
# temp -- the size of the fdis np.array should depend on strgmodl
fdistdim = np.zeros((gdat.numbbinsplot, gdat.numbbinsplot))
if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][nameparaelemseco]) > 0 and len(gmodstat.dictelem[l][nameparaelemfrst]) > 0:
strgfeattdim = nameparaelemfrst + nameparaelemseco + 'pop%d' % l
fitthistfeattdim = getattr(gmodstat, 'hist' + strgfeattdim)
binsfeatseco = getattr(gdat.binspara, nameparaelemseco)
fitthistfeattdimfals = np.histogram2d(gmodstat.dictelem[l][nameparaelemfrst][indxelemfittasscfals[q][l]], \
gmodstat.dictelem[l][nameparaelemseco][indxelemfittasscfals[q][l]], bins=(binsfeatfrst, binsfeatseco))[0]
indxgood = np.where(fitthistfeattdim != 0.)
if indxgood[0].size > 0:
fdistdim[indxgood] = fitthistfeattdimfals[indxgood].astype(float) / fitthistfeattdim[indxgood]
if gdat.booldiagmode:
if np.where((fdistdim[indxgood] > 1.) | (fdistdim[indxgood] < 0.))[0].size > 0:
raise Exception('')
setattr(gmodstat, 'fdis%s%spop%d' % (nameparaelemfrst, nameparaelemseco, l), fdistdim)
fdisfrst = np.zeros(gdat.numbbinsplot)
if len(indxelemrefrasschits[q][l]) > 0 and len(gmodstat.dictelem[l][nameparaelemfrst]) > 0:
binsfeatfrst = getattr(gdat.binspara, nameparaelemfrst)
fitthistfeatfrstfals = np.histogram(gmodstat.dictelem[l][nameparaelemfrst][indxelemfittasscfals[q][l]], bins=binsfeatfrst)[0]
fitthistfeatfrst = getattr(gmodstat, 'hist' + nameparaelemfrst + 'pop%d' % l)
indxgood = np.where(fitthistfeatfrst != 0.)[0]
if indxgood.size > 0:
fdisfrst[indxgood] = fitthistfeatfrstfals[indxgood].astype(float) / fitthistfeatfrst[indxgood]
if gdat.booldiagmode:
if | np.where((fdisfrst[indxgood] > 1.) | (fdisfrst[indxgood] < 0.)) | numpy.where |
from typing import Any, Dict, Union
import numpy as np
from numpy.core.defchararray import center
import panda_gym
from panda_gym.envs.core import Task
from panda_gym.utils import distance
class ReachBimanual(Task):
def __init__(
self,
sim,
get_ee_position0,
get_ee_position1,
reward_type="sparse",
distance_threshold=0.05,
goal_range=0.35,
has_object = False,
absolute_pos = False,
obj_not_in_hand_rate = 1,
) -> None:
super().__init__(sim)
self.has_object = has_object
self.absolute_pos = absolute_pos
self.object_size = 0.04
self.reward_type = reward_type
self.distance_threshold = distance_threshold
self.obj_not_in_hand_rate = obj_not_in_hand_rate
self.get_ee_position0 = get_ee_position0
self.get_ee_position1 = get_ee_position1
self.goal_range_low = np.array([goal_range / 4, goal_range / 4, -goal_range/1.5])
self.goal_range_high = np.array([goal_range, goal_range, goal_range/1.5])
obj_xyz_range=[0.3, 0.3, 0]
self.obj_range_low = np.array([0.1, -obj_xyz_range[1] / 2, self.object_size/2])
self.obj_range_high = np.array(obj_xyz_range) + self.obj_range_low
with self.sim.no_rendering():
self._create_scene()
self.sim.place_visualizer(target_position=np.zeros(3), distance=0.9, yaw=45, pitch=-30)
self._max_episode_steps = 50
def _create_scene(self) -> None:
self.sim.create_plane(z_offset=-0.4)
self.sim.create_table(length=1., width=0.7, height=0.4, x_offset=-0.575)
self.sim.create_table(length=1., width=0.7, height=0.4, x_offset=0.575)
self.sim.create_sphere(
body_name="target0",
radius=0.02,
mass=0.0,
ghost=True,
position=np.zeros(3),
rgba_color=np.array([0.1, 0.9, 0.1, 0.3]),
)
self.sim.create_sphere(
body_name="target1",
radius=0.02,
mass=0.0,
ghost=True,
position= | np.zeros(3) | numpy.zeros |
#!/usr/bin/env python
u"""
radial_basis.py
Written by <NAME> (01/2022)
Interpolates data using radial basis functions
CALLING SEQUENCE:
ZI = radial_basis(xs, ys, zs, XI, YI, polynomial=0,
smooth=smooth, epsilon=epsilon, method='inverse')
INPUTS:
xs: scaled input X data
ys: scaled input Y data
zs: input data
XI: scaled grid X for output ZI
YI: scaled grid Y for output ZI
OUTPUTS:
ZI: interpolated data grid
OPTIONS:
smooth: smoothing weights
metric: distance metric to use (default euclidean)
epsilon: adjustable constant for distance functions
default is mean Euclidean distance
polynomial: polynomial order if augmenting radial basis functions
default None: no polynomials
method: radial basis function
multiquadric
inverse_multiquadric or inverse (default)
inverse_quadratic
gaussian
linear (first-order polyharmonic spline)
cubic (third-order polyharmonic spline)
quintic (fifth-order polyharmonic spline)
thin_plate: thin-plate spline
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (https://numpy.org)
scipy: Scientific Tools for Python (https://docs.scipy.org/doc/)
REFERENCES:
<NAME>, Multiquadric equations of topography and other irregular
surfaces, J. Geophys. Res., 76(8), 1905-1915, 1971.
<NAME>, "Radial Basis Functions", Cambridge Monographs on Applied and
Computational Mathematics, 2003.
UPDATE HISTORY:
Updated 01/2022: added function docstrings
Updated 07/2021: using scipy spatial distance routines
Updated 09/2017: using rcond=-1 in numpy least-squares algorithms
Updated 01/2017: epsilon in polyharmonic splines (linear, cubic, quintic)
Updated 08/2016: using format text within ValueError, edit constant vector
added low-order polynomial option (previously used default constant)
Updated 01/2016: new hierarchical_radial_basis function
that first reduces to points within distance. added cutoff option
Updated 10/2014: added third dimension (spherical)
Written 08/2014
"""
from __future__ import print_function, division
import numpy as np
import scipy.spatial
def radial_basis(xs, ys, zs, XI, YI, smooth=0.0, metric='euclidean',
epsilon=None, method='inverse', polynomial=None):
"""
Interpolates data using radial basis functions
Arguments
---------
xs: scaled input x-coordinates
ys: scaled input y-coordinates
zs: input data
XI: scaled output x-coordinates for data grid
YI: scaled output y-coordinates for data grid
Keyword arguments
-----------------
smooth: smoothing weights
metric: distance metric to use (default euclidean)
epsilon: adjustable constant for distance functions
method: radial basis function
- multiquadric
- inverse_multiquadric or inverse (default)
- inverse_quadratic
- gaussian
- linear (first-order polyharmonic spline)
- cubic (third-order polyharmonic spline)
- quintic (fifth-order polyharmonic spline)
- thin_plate: thin-plate spline
polynomial: polynomial order if augmenting radial basis functions
Returns
-------
ZI: interpolated data grid
"""
#-- remove singleton dimensions
xs = np.squeeze(xs)
ys = np.squeeze(ys)
zs = np.squeeze(zs)
XI = np.squeeze(XI)
YI = np.squeeze(YI)
#-- size of new matrix
if (np.ndim(XI) == 1):
nx = len(XI)
else:
nx,ny = np.shape(XI)
#-- Check to make sure sizes of input arguments are correct and consistent
if (len(zs) != len(xs)) | (len(zs) != len(ys)):
raise Exception('Length of X, Y, and Z must be equal')
if (np.shape(XI) != np.shape(YI)):
raise Exception('Size of XI and YI must be equal')
#-- create python dictionary of radial basis function formulas
radial_basis_functions = {}
radial_basis_functions['multiquadric'] = multiquadric
radial_basis_functions['inverse_multiquadric'] = inverse_multiquadric
radial_basis_functions['inverse'] = inverse_multiquadric
radial_basis_functions['inverse_quadratic'] = inverse_quadratic
radial_basis_functions['gaussian'] = gaussian
radial_basis_functions['linear'] = poly_spline1
radial_basis_functions['cubic'] = poly_spline3
radial_basis_functions['quintic'] = poly_spline5
radial_basis_functions['thin_plate'] = thin_plate
#-- check if formula name is listed
if method in radial_basis_functions.keys():
RBF = radial_basis_functions[method]
else:
raise ValueError("Method {0} not implemented".format(method))
#-- Creation of data distance matrix
#-- Data to Data
if (metric == 'brute'):
#-- use linear algebra to compute euclidean distances
Rd = distance_matrix(
np.array([xs, ys]),
np.array([xs, ys])
)
else:
#-- use scipy spatial distance routines
Rd = scipy.spatial.distance.cdist(
| np.array([xs, ys]) | numpy.array |
"""
Data preproc functions:
adjust_to_see: adjust image to better visualize (rotate and transpose)
augmentation: apply variations to a list of images
normalization: apply normalization and variations on images (if required)
encode_ctc: encode batch of texts in sparse array with padding
standardize_texts: standardize batch of texts
preproc: main function to the preprocess.
Make the image:
illumination_compensation: apply illumination regularitation
remove_cursive_style: remove cursive style from image (if necessary)
sauvola: apply sauvola binarization
"""
import unicodedata
import numpy as np
import string
import cv2
def adjust_to_see(img):
"""Rotate and transpose to image visualize (cv2 method or jupyter notebook)"""
(h, w) = img.shape[:2]
(cX, cY) = (w // 2, h // 2)
M = cv2.getRotationMatrix2D((cX, cY), -90, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
img = cv2.warpAffine(img, M, (nW + 1, nH + 1))
img = cv2.warpAffine(img.transpose(), M, (nW, nH))
return img
def augmentation(imgs,
dilate_range=1,
erode_range=1,
height_shift_range=0,
rotation_range=0,
scale_range=0,
width_shift_range=0):
"""Apply variations to a list of images (rotate, width and height shift, scale, erode, dilate)"""
imgs = imgs.astype(np.float32)
_, h, w = imgs.shape
dilate_kernel = np.ones((int(np.random.uniform(1, dilate_range)),), np.uint8)
erode_kernel = np.ones((int(np.random.uniform(1, erode_range)),), np.uint8)
height_shift = np.random.uniform(-height_shift_range, height_shift_range)
rotation = np.random.uniform(-rotation_range, rotation_range)
scale = np.random.uniform(1 - scale_range, 1)
width_shift = np.random.uniform(-width_shift_range, width_shift_range)
trans_map = np.float32([[1, 0, width_shift * w], [0, 1, height_shift * h]])
rot_map = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, scale)
trans_map_aff = np.r_[trans_map, [[0, 0, 1]]]
rot_map_aff = np.r_[rot_map, [[0, 0, 1]]]
affine_mat = rot_map_aff.dot(trans_map_aff)[:2, :]
for i in range(len(imgs)):
imgs[i] = cv2.warpAffine(imgs[i], affine_mat, (w, h), flags=cv2.INTER_NEAREST, borderValue=255)
imgs[i] = cv2.erode(imgs[i], erode_kernel, iterations=1)
imgs[i] = cv2.dilate(imgs[i], dilate_kernel, iterations=1)
return imgs
def normalization(imgs):
"""Normalize list of images"""
imgs = imgs.astype(np.float32)
_, h, w = imgs.shape
for i in range(len(imgs)):
m, s = cv2.meanStdDev(imgs[i])
imgs[i] = imgs[i] - m[0][0]
imgs[i] = imgs[i] / s[0][0] if s[0][0] > 0 else imgs[i]
return np.expand_dims(imgs, axis=-1)
def decode_ctc(texts, charset):
"""Decode sparse array (sparse to text)"""
decoded = []
for i in range(len(texts)):
text = "".join([charset[int(c)] for c in texts[i]])
decoded.append(" ".join(text.split()))
return decoded
def encode_ctc(texts, charset, max_text_length):
"""Encode text array (text to sparse)"""
pad_encoded = np.zeros((len(texts), max_text_length))
for i in range(len(texts)):
texts[i] = unicodedata.normalize("NFKD", texts[i]).encode("ASCII", "ignore").decode("ASCII")
texts[i] = " ".join(texts[i].split())
encoded = [float(charset.find(x)) for x in texts[i] if charset.find(x) > -1]
encoded = [float(charset.find("&"))] if len(encoded) == 0 else encoded
pad_encoded[i, 0:len(encoded)] = encoded
return pad_encoded
def standardize_texts(texts):
"""Organize/add spaces around punctuation marks"""
for i in range(len(texts)):
texts[i] = " ".join(texts[i].split()).replace(" '", "'").replace("' ", "'")
texts[i] = texts[i].replace("«", "").replace("»", "")
for y in texts[i]:
if y in string.punctuation.replace("'", ""):
texts[i] = texts[i].replace(y, f" {y} ")
texts[i] = " ".join(texts[i].split())
return texts
"""
Preprocess metodology based in:
<NAME>, <NAME> and <NAME>,
Word Beam Search: A Connectionist Temporal Classification Decoding Algorithm, in
16th International Conference on Frontiers in Handwriting Recognition, pp. 256-258, 2018.
"""
def preproc(img, img_size):
"""Make the process with the `img_size` to the scale resize"""
wt, ht, _ = img_size
h, w = img.shape
f = max((w / wt), (h / ht))
new_size = (max(min(wt, int(w / f)), 1), max(min(ht, int(h / f)), 1))
img = cv2.resize(img, new_size)
img = illumination_compensation(img)
img = remove_cursive_style(img)
target = np.ones([ht, wt], dtype=np.uint8) * 255
target[0:new_size[1], 0:new_size[0]] = img
img = cv2.transpose(target)
return img
"""
Illumination Compensation based in:
<NAME>, <NAME>, <NAME>,
Efficient illumination compensation techniques for text images, in
Digital Signal Processing, 22(5), pp. 726-733, 2012.
"""
def illumination_compensation(img):
"""Illumination compensation technique for text image"""
def scale(img):
s = np.max(img) - np.min(img)
res = img / s
res -= np.min(res)
res *= 255
return res
img = img.astype(np.float32)
height, width = img.shape
sqrt_hw = np.sqrt(height * width)
bins = np.arange(0, 300, 10)
bins[26] = 255
hp = np.histogram(img, bins)
for i in range(len(hp[0])):
if hp[0][i] > sqrt_hw:
hr = i * 10
break
| np.seterr(divide='ignore', invalid='ignore') | numpy.seterr |
"""
Feature extraction
"""
# Author: <NAME> <<EMAIL>>
#
# License: Apache, Version 2.0
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.metrics import adjusted_mutual_info_score
from scipy.special import psi
from scipy.stats.stats import pearsonr
from scipy.stats import skew, kurtosis
from collections import Counter, defaultdict
from multiprocessing import Pool
import pandas as pd
import operator
from .hsic import FastHsicTestGamma
import math
BINARY = "Binary"
CATEGORICAL = "Categorical"
NUMERICAL = "Numerical"
class FeatureMapper:
def __init__(self, features):
self.features = features
def fit(self, X, y=None):
for feature_name in self.features:
extractor.fit(X[feature_name].values[:, np.newaxis], y)
def transform(self, X):
return X[self.features].values
def fit_transform(self, X, y=None):
return self.transform(X)
def weighted_mean_and_std(values, weights):
"""
Returns the weighted average and standard deviation.
values, weights -- numpy ndarrays with the same shape.
"""
average = np.average(values, weights=weights, axis=0)
variance = np.dot(weights, (values - average) ** 2) / weights.sum() # Fast and numerically precise
return (average, np.sqrt(variance))
def count_unique(x):
try:
return len(set(x))
except TypeError:
return len(set(x.flat))
def count_unique_ratio(x):
try:
return len(set(x)) / float(len(x))
except TypeError:
return len(set(x.flat))/float(len(x))
def binary(tp):
assert type(tp) is str
return tp == BINARY
def categorical(tp):
assert type(tp) is str
return tp == CATEGORICAL
def numerical(tp):
assert type(tp) is str
return tp == NUMERICAL
def binary_entropy(p, base):
assert p <= 1 and p >= 0
h = -(p * np.log(p) + (1 - p) * np.log(1 - p)) if (p != 0) and (p != 1) else 0
return h / np.log(base)
def discrete_probability(x, tx, ffactor, maxdev):
x = discretized_sequence(x, tx, ffactor, maxdev)
try:
return Counter(x)
except TypeError as e:
return Counter(np.array(x).flat) if isinstance(x, list) else Counter(x.flat)
def discretized_values(x, tx, ffactor, maxdev):
if numerical(tx) and count_unique(x) > (2 * ffactor * maxdev + 1):
vmax = ffactor * maxdev
vmin = -ffactor * maxdev
return range(vmin, vmax + 1)
else:
try:
return sorted(list(set(x)))
except TypeError:
return sorted(list(set(x.flat)))
def len_discretized_values(x, tx, ffactor, maxdev):
return len(discretized_values(x, tx, ffactor, maxdev))
def discretized_sequence(x, tx, ffactor, maxdev, norm=True):
if not norm or (numerical(tx) and count_unique(x) > len_discretized_values(x, tx, ffactor, maxdev)):
if norm:
x = (x - np.mean(x)) / np.std(x)
xf = x[abs(x) < maxdev]
x = (x - np.mean(xf)) / np.std(xf)
x = np.round(x * ffactor)
vmax = ffactor * maxdev
vmin = -ffactor * maxdev
x[x > vmax] = vmax
x[x < vmin] = vmin
return x
def discretized_sequences(x, tx, y, ty, ffactor=3, maxdev=3):
return discretized_sequence(x, tx, ffactor, maxdev), discretized_sequence(y, ty, ffactor, maxdev)
def normalized_error_probability(x, tx, y, ty, ffactor=3, maxdev=3):
x, y = discretized_sequences(x, tx, y, ty, ffactor, maxdev)
try:
cx = Counter(x)
cy = Counter(y)
except TypeError:
cx = Counter(x.flat)
cy = Counter(y.flat)
nx = len(cx)
ny = len(cy)
pxy = defaultdict(lambda: 0)
try:
for p in zip(x, y):
pxy[p] += 1
except TypeError:
for p in zip(x.flat, y.flat):
pxy[p] += 1
pxy = np.array([[pxy[(a, b)] for b in cy] for a in cx], dtype=float)
pxy = pxy / pxy.sum()
perr = 1 - np.sum(pxy.max(axis=1))
max_perr = 1 - np.max(pxy.sum(axis=0))
pnorm = perr / max_perr if max_perr > 0 else perr
return pnorm
def discrete_entropy(x, tx, ffactor=3, maxdev=3, bias_factor=0.7):
c = discrete_probability(x, tx, ffactor, maxdev)
# print(c, len(c))
pk = np.array(list(c.values()), dtype=float)
pk = pk / pk.sum()
vec = pk * np.log(pk)
S = -np.sum(vec, axis=0)
return S + bias_factor * (len(pk) - 1) / float(2 * len(list(x)))
def discrete_divergence(cx, cy):
for a, v in cx.most_common():
if cy[a] == 0:
cy[a] = 1
nx = float(sum(cx.values()))
ny = float(sum(cy.values()))
sum = 0.
for a, v in cx.most_common():
px = v / nx
py = cy[a] / ny
sum += px * np.log(px / py)
return sum
def discrete_joint_entropy(x, tx, y, ty, ffactor=3, maxdev=3):
x, y = discretized_sequences(x, tx, y, ty, ffactor, maxdev)
return discrete_entropy(list(zip(x, y)), CATEGORICAL)
def normalized_discrete_joint_entropy(x, tx, y, ty, ffactor=3, maxdev=3):
x, y = discretized_sequences(x, tx, y, ty, ffactor, maxdev)
e = discrete_entropy(list(zip(x, y)), CATEGORICAL)
nx = len_discretized_values(x, tx, ffactor, maxdev)
ny = len_discretized_values(y, ty, ffactor, maxdev)
if nx * ny > 0: e = e / np.log(nx * ny)
return e
def discrete_conditional_entropy(x, tx, y, ty):
return discrete_joint_entropy(x, tx, y, ty) - discrete_entropy(y, ty)
def adjusted_mutual_information(x, tx, y, ty, ffactor=3, maxdev=3):
x, y = discretized_sequences(x, tx, y, ty, ffactor, maxdev)
try:
return adjusted_mutual_info_score(x, y)
except ValueError:
return adjusted_mutual_info_score(x.squeeze(1), y.squeeze(1))
def discrete_mutual_information(x, tx, y, ty):
ex = discrete_entropy(x, tx)
ey = discrete_entropy(y, ty)
exy = discrete_joint_entropy(x, tx, y, ty)
mxy = max((ex + ey) - exy,
0) # Mutual information is always positive: max() avoid negative values due to numerical errors
return mxy
def normalized_discrete_entropy(x, tx, ffactor=3, maxdev=3):
e = discrete_entropy(x, tx, ffactor, maxdev)
n = len_discretized_values(x, tx, ffactor, maxdev)
if n > 0: e = e / np.log(n)
return e
# Continuous information measures
def to_numerical(x, y):
dx = defaultdict(lambda: np.zeros(2))
for i, a in enumerate(x):
dx[a][0] += y[i]
dx[a][1] += 1
for a in dx.keys():
dx[a][0] /= dx[a][1]
x = np.array([dx[a][0] for a in x], dtype=float)
return x
def normalize(x, tx):
if not numerical(tx): # reassign labels according to its frequency
try:
cx = Counter(x)
except TypeError:
cx = Counter(x.flat)
xmap = dict()
# nx = len(cx)
# center = nx/2 if (nx % 4) == 0 else (nx-1)//2
# for i, k in enumerate(cx.most_common()):
# offset = (i+1)//2
# if (i % 4) > 1: offset = -offset
# xmap[k[0]] = center + offset
for i, k in enumerate(cx.most_common()):
xmap[k[0]] = i
y = np.array([xmap[a] for a in x.flat], dtype=float)
else:
y = x
y = y - np.mean(y)
if | np.std(y) | numpy.std |
"""PyFstat search & follow-up classes using MCMC-based methods
The general approach is described in
Ashton & Prix (PRD 97, 103020, 2018):
https://arxiv.org/abs/1802.05450
and we use the `ptemcee` sampler
described in Vousden et al. (MNRAS 455, 1919-1937, 2016):
https://arxiv.org/abs/1501.05823
and based on Foreman-Mackey et al. (PASP 125, 306, 2013):
https://arxiv.org/abs/1202.3665
Defining the prior
##################
The MCMC based searches (i.e. `pyfstat.MCMC*`) require a prior specification for each model parameter,
implemented via a `python dictionary <https://docs.python.org/tutorial/datastructures.html#dictionaries>`_.
This is best explained through a simple example, here is the prior for a *directed* search with a *uniform*
prior on the frequency and a *normal* prior on the frequency derivative:
.. code-block:: python
theta_prior = {'F0': {'type': 'unif',
'lower': 29.9,
'upper': 30.1},
'F1': {'type': 'norm',
'loc': 0,
'scale': 1e-10},
'F2': 0,
'Alpha': 2.3,
'Delta': 1.8
}
For the sky positions ``Alpha`` and ``Delta``, we give the fixed values (i.e. they are considered *known* by
the MCMC simulation), the same is true for ``F2``, the second derivative of the frequency which we fix at ``0``.
Meanwhile, for the frequency ``F0`` and first frequency derivative ``F1`` we give a dictionary specifying their
prior distribution. This dictionary must contain three arguments: the ``type`` (in this case either ``unif`` or
``norm``) which specifies the type of distribution, then two shape arguments. The shape parameters will depend
on the ``type`` of distribution, but here we use ``lower`` and ``upper``, required for the ``unif`` prior while
``loc`` and ``scale`` are required for the ``norm`` prior.
Currently, two other types of prior are implemented: ``halfnorm``, ``neghalfnorm`` (both of which require ``loc``
and ``scale`` shape parameters). Further priors can be added by modifying ``pyfstat.MCMCSearch._generic_lnprior``.
"""
import sys
import os
import copy
import logging
from collections import OrderedDict
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from ptemcee import Sampler as PTSampler
import corner
import dill as pickle
from scipy.stats import lognorm
import pyfstat.core as core
from pyfstat.core import BaseSearchClass, tqdm, args
import pyfstat.optimal_setup_functions as optimal_setup_functions
import pyfstat.helper_functions as helper_functions
class MCMCSearch(BaseSearchClass):
"""
MCMC search using ComputeFstat.
Evaluates the coherent F-statistic across a parameter space region
corresponding to an isolated/binary-modulated CW signal.
"""
symbol_dictionary = dict(
F0=r"$f$",
F1=r"$\dot{f}$",
F2=r"$\ddot{f}$",
Alpha=r"$\alpha$",
Delta=r"$\delta$",
asini=r"asini",
period=r"P",
ecc=r"ecc",
tp=r"tp",
argp=r"argp",
)
"""
Key, val pairs of the parameters (`F0`, `F1`, ...), to LaTeX math
symbols for plots
"""
unit_dictionary = dict(
F0=r"Hz",
F1=r"Hz/s",
F2=r"Hz/s$^2$",
Alpha=r"rad",
Delta=r"rad",
asini="",
period=r"s",
ecc="",
tp=r"s",
argp="",
)
"""
Key, val pairs of the parameters (i.e. `F0`, `F1`), and the
units (i.e. `Hz`)
"""
transform_dictionary = {}
"""
Key, val pairs of the parameters (i.e. `F0`, `F1`), where the key is
itself a dictionary which can item `multiplier`, `subtractor`, or
`unit` by which to transform by and update the units.
"""
def __init__(
self,
theta_prior,
tref,
label,
outdir="data",
minStartTime=None,
maxStartTime=None,
sftfilepattern=None,
detectors=None,
nsteps=[100, 100],
nwalkers=100,
ntemps=1,
log10beta_min=-5,
theta_initial=None,
rhohatmax=1000,
binary=False,
BSGL=False,
SSBprec=None,
RngMedWindow=None,
minCoverFreq=None,
maxCoverFreq=None,
injectSources=None,
assumeSqrtSX=None,
transientWindowType=None,
tCWFstatMapVersion="lal",
earth_ephem=None,
sun_ephem=None,
allowedMismatchFromSFTLength=None,
):
"""
Parameters
----------
theta_prior: dict
Dictionary of priors and fixed values for the search parameters.
For each parameters (key of the dict), if it is to be held fixed
the value should be the constant float, if it is be searched, the
value should be a dictionary of the prior.
tref, minStartTime, maxStartTime: int
GPS seconds of the reference time, start time and end time. While tref
is requirede, minStartTime and maxStartTime default to None in which
case all available data is used.
label, outdir: str
A label and output directory (optional, default is `data`) to
name files
sftfilepattern: str, optional
Pattern to match SFTs using wildcards (`*?`) and ranges [0-9];
mutiple patterns can be given separated by colons.
detectors: str, optional
Two character reference to the detectors to use, specify None for no
contraint and comma separated strings for multiple references.
nsteps: list (2,), optional
Number of burn-in and production steps to take, [nburn, nprod]. See
`pyfstat.MCMCSearch.setup_initialisation()` for details on adding
initialisation steps.
nwalkers, ntemps: int, optional
The number of walkers and temperates to use in the parallel
tempered PTSampler.
log10beta_min: float < 0, optional
The log_10(beta) value. If given, the set of betas passed to PTSampler
are generated from `np.logspace(0, log10beta_min, ntemps)` (given
in descending order to ptemcee).
theta_initial: dict, array, optional
A dictionary of distribution about which to distribute the
initial walkers about.
rhohatmax: float, optional
Upper bound for the SNR scale parameter (required to normalise the
Bayes factor) - this needs to be carefully set when using the
evidence.
binary: bool, optional
If true, search over binary orbital parameters.
BSGL: bool, optional
If true, use the BSGL statistic.
SSBPrec: int, optional
SSBPrec (SSB precision) to use when calling ComputeFstat. See `core.ComputeFstat`.
RngMedWindow: int, optional
Running-Median window size (number of bins) for ComputeFstat. See `core.ComputeFstat`.
minCoverFreq, maxCoverFreq: float, optional
Minimum and maximum instantaneous frequency which will be covered
over the SFT time span as passed to CreateFstatInput. See `core.ComputeFstat`.
injectSources: dict, optional
If given, inject these properties into the SFT files before running
the search. See `core.ComputeFstat`.
assumeSqrtSX: float or list or str
Don't estimate noise-floors, but assume (stationary) per-IFO sqrt{SX}.
See `core.ComputeFstat`.
transientWindowType: str
If 'rect' or 'exp',
compute atoms so that a transient (t0,tau) map can later be computed.
('none' instead of None explicitly calls the transient-window function,
but with the full range, for debugging). See `core.ComputeFstat`.
Currently only supported for nsegs=1.
tCWFstatMapVersion: str
Choose between standard 'lal' implementation,
'pycuda' for gpu, and some others for devel/debug.
allowedMismatchFromSFTLength: float
Maximum allowed mismatch from SFTs being too long
[Default: what's hardcoded in XLALFstatMaximumSFTLength].
"""
self._set_init_params_dict(locals())
self.theta_prior = theta_prior
self.tref = tref
self.label = label
self.outdir = outdir
self.minStartTime = minStartTime
self.maxStartTime = maxStartTime
self.sftfilepattern = sftfilepattern
self.detectors = detectors
self.nsteps = nsteps
self.nwalkers = nwalkers
self.ntemps = ntemps
self.log10beta_min = log10beta_min
self.theta_initial = theta_initial
self.rhohatmax = rhohatmax
self.binary = binary
self.BSGL = BSGL
self.SSBprec = SSBprec
self.RngMedWindow = RngMedWindow
self.minCoverFreq = minCoverFreq
self.maxCoverFreq = maxCoverFreq
self.injectSources = injectSources
self.assumeSqrtSX = assumeSqrtSX
self.transientWindowType = transientWindowType
self.tCWFstatMapVersion = tCWFstatMapVersion
self.set_ephemeris_files(earth_ephem, sun_ephem)
self.allowedMismatchFromSFTLength = allowedMismatchFromSFTLength
os.makedirs(outdir, exist_ok=True)
self.output_file_header = self.get_output_file_header()
self._add_log_file(self.output_file_header)
logging.info("Set-up MCMC search for model {}".format(self.label))
if sftfilepattern:
logging.info("Using data {}".format(self.sftfilepattern))
else:
logging.info("No sftfilepattern given")
if injectSources:
logging.info("Inject sources: {}".format(injectSources))
self.pickle_path = os.path.join(self.outdir, self.label + "_saved_data.p")
self._unpack_input_theta()
self.ndim = len(self.theta_keys)
if self.log10beta_min:
self.betas = np.logspace(0, self.log10beta_min, self.ntemps)
else:
self.betas = None
if args.clean and os.path.isfile(self.pickle_path):
os.rename(self.pickle_path, self.pickle_path + ".old")
self._set_likelihoodcoef()
self._log_input()
def _set_likelihoodcoef(self):
"""Additional constant terms to turn a detection statistic into a likelihood.
In general, the (log-)likelihood can be obtained from the signal-to-noise
(log-)Bayes factor
(omitting the overall Gaussian-noise normalization term)
but the detection statistic may only be a monotonic function of the
Bayes factor, not the full thing.
E.g. this is the case for the standard CW F-statistic!
"""
if self.BSGL:
# In this case, the corresponding term is already included
# in the detection statistic itself.
# See Eq. (36) in Keitel et al (PRD 89, 064023, 2014):
# https://arxiv.org/abs/1311.5738
# where Fstar0 = ln(cstar) = ln(rhohatmax**4/70).
# We just need to switch to natural log basis.
self.likelihooddetstatmultiplier = np.log(10)
self.likelihoodcoef = 0
else:
# If assuming only Gaussian noise + signal,
# the likelihood is essentially the F-statistic,
# but with an extra constant term depending on the amplitude prior.
# See Eq. (9) of Ashton & Prix (PRD 97, 103020, 2018):
# https://arxiv.org/abs/1802.05450
# Also need to go from twoF to F.
self.likelihooddetstatmultiplier = 0.5
self.likelihoodcoef = np.log(70.0 / self.rhohatmax ** 4)
def _log_input(self):
logging.info("theta_prior = {}".format(self.theta_prior))
logging.info("nwalkers={}".format(self.nwalkers))
logging.info("nsteps = {}".format(self.nsteps))
logging.info("ntemps = {}".format(self.ntemps))
logging.info("log10beta_min = {}".format(self.log10beta_min))
def _get_search_ranges(self):
"""take prior widths as proxy "search ranges" to allow covering band estimate"""
if (self.minCoverFreq is None) or (self.maxCoverFreq is None):
normal_stds = 3 # this might not always be enough
prior_bounds, norm_trunc_warn = self._get_prior_bounds(normal_stds)
if norm_trunc_warn:
logging.warning(
"Gaussian priors (normal / half-normal) have been truncated"
" at {:f} standard deviations for estimating the coverage"
" frequency band. If sampling fails at any point, please"
" consider manually setting [minCoverFreq,maxCoverFreq] to"
" more generous values.".format(normal_stds)
)
# first start with parameters that have non-delta prior ranges
search_ranges = {
key: [bound["lower"], bound["upper"]]
for key, bound in prior_bounds.items()
}
# then add fixed-point (delta prior) parameters
for key in self.theta_prior:
if key not in self.theta_keys:
search_ranges[key] = [self.theta_prior[key]]
return search_ranges
else:
return None
def _initiate_search_object(self):
logging.info("Setting up search object")
search_ranges = self._get_search_ranges()
self.search = core.ComputeFstat(
tref=self.tref,
sftfilepattern=self.sftfilepattern,
minCoverFreq=self.minCoverFreq,
maxCoverFreq=self.maxCoverFreq,
search_ranges=search_ranges,
detectors=self.detectors,
BSGL=self.BSGL,
transientWindowType=self.transientWindowType,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
binary=self.binary,
injectSources=self.injectSources,
assumeSqrtSX=self.assumeSqrtSX,
SSBprec=self.SSBprec,
RngMedWindow=self.RngMedWindow,
tCWFstatMapVersion=self.tCWFstatMapVersion,
earth_ephem=self.earth_ephem,
sun_ephem=self.sun_ephem,
allowedMismatchFromSFTLength=self.allowedMismatchFromSFTLength,
)
if self.minStartTime is None:
self.minStartTime = self.search.minStartTime
if self.maxStartTime is None:
self.maxStartTime = self.search.maxStartTime
def _logp(self, theta_vals, theta_prior, theta_keys, search):
H = [
self._generic_lnprior(**theta_prior[key])(p)
for p, key in zip(theta_vals, theta_keys)
]
return np.sum(H)
def _set_point_for_evaluation(self, theta):
"""Combines fixed and variable parameters to form a valid evaluation point.
Parameters
----------
theta: list or np.ndarray
The sampled (variable) parameters.
Returns
-------
p: list
The full parameter space point as a list.
"""
p = copy.copy(self.fixed_theta)
for j, theta_i in enumerate(self.theta_idxs):
p[theta_i] = theta[j]
return p
def _logl(self, theta, search):
in_theta = self._set_point_for_evaluation(theta)
detstat = search.get_det_stat(*in_theta)
return detstat * self.likelihooddetstatmultiplier + self.likelihoodcoef
def _unpack_input_theta(self):
self.full_theta_keys = ["F0", "F1", "F2", "Alpha", "Delta"]
if self.binary:
self.full_theta_keys += ["asini", "period", "ecc", "tp", "argp"]
full_theta_keys_copy = copy.copy(self.full_theta_keys)
self.theta_keys = []
fixed_theta_dict = {}
for key, val in self.theta_prior.items():
if type(val) is dict:
fixed_theta_dict[key] = 0
self.theta_keys.append(key)
elif type(val) in [float, int, np.float64]:
fixed_theta_dict[key] = val
else:
raise ValueError(
"Type {} of {} in theta not recognised".format(type(val), key)
)
full_theta_keys_copy.pop(full_theta_keys_copy.index(key))
if len(full_theta_keys_copy) > 0:
raise ValueError(
("Input dictionary `theta` is missing the" "following keys: {}").format(
full_theta_keys_copy
)
)
self.fixed_theta = [fixed_theta_dict[key] for key in self.full_theta_keys]
self.theta_idxs = [self.full_theta_keys.index(k) for k in self.theta_keys]
self.theta_symbols = [self.symbol_dictionary[k] for k in self.theta_keys]
idxs = np.argsort(self.theta_idxs)
self.theta_idxs = [self.theta_idxs[i] for i in idxs]
self.theta_symbols = [self.theta_symbols[i] for i in idxs]
self.theta_keys = [self.theta_keys[i] for i in idxs]
self.output_keys = self.theta_keys.copy()
self.output_keys.append("twoF")
if self.BSGL:
self.output_keys.append("log10BSGL")
def _evaluate_logpost(self, p0vec):
init_logp = np.array(
[
self._logp(p, self.theta_prior, self.theta_keys, self.search)
for p in p0vec
]
)
init_logl = np.array([self._logl(p, self.search) for p in p0vec])
return init_logl + init_logp
def _check_initial_points(self, p0):
for nt in range(self.ntemps):
logging.info("Checking temperature {} chains".format(nt))
num = sum(self._evaluate_logpost(p0[nt]) == -np.inf)
if num > 0:
logging.warning(
"Of {} initial values, {} are -np.inf due to the prior".format(
len(p0[0]), num
)
)
p0 = self._generate_new_p0_to_fix_initial_points(p0, nt)
def _generate_new_p0_to_fix_initial_points(self, p0, nt):
logging.info("Attempting to correct intial values")
init_logpost = self._evaluate_logpost(p0[nt])
idxs = np.arange(self.nwalkers)[init_logpost == -np.inf]
count = 0
while sum(init_logpost == -np.inf) > 0 and count < 100:
for j in idxs:
p0[nt][j] = p0[nt][np.random.randint(0, self.nwalkers)] * (
1 + np.random.normal(0, 1e-10, self.ndim)
)
init_logpost = self._evaluate_logpost(p0[nt])
count += 1
if sum(init_logpost == -np.inf) > 0:
logging.info("Failed to fix initial priors")
else:
logging.info("Suceeded to fix initial priors")
return p0
def setup_initialisation(self, nburn0, scatter_val=1e-10):
"""Add an initialisation step to the MCMC run
If called prior to `run()`, adds an intial step in which the MCMC
simulation is run for `nburn0` steps. After this, the MCMC simulation
continues in the usual manner (i.e. for nburn and nprod steps), but the
walkers are reset scattered around the maximum likelihood position
of the initialisation step.
Parameters
----------
nburn0: int
Number of initialisation steps to take.
scatter_val: float
Relative number to scatter walkers around the maximum likelihood
position after the initialisation step. If the maximum likelihood
point is located at `p`, the new walkers are randomly drawn from a
multivariate gaussian distribution centered at `p` with standard
deviation `diag(scatter_val * p)`.
"""
logging.info(
"Setting up initialisation with nburn0={}, scatter_val={}".format(
nburn0, scatter_val
)
)
self.nsteps = [nburn0] + self.nsteps
self.scatter_val = scatter_val
def _run_sampler(self, p0, nprod=0, nburn=0, window=50):
for result in tqdm(
self.sampler.sample(p0, iterations=nburn + nprod), total=nburn + nprod
):
pass
self.mean_acceptance_fraction = np.mean(
self.sampler.acceptance_fraction, axis=1
)
logging.info(
"Mean acceptance fraction: {}".format(self.mean_acceptance_fraction)
)
if self.ntemps > 1:
self.tswap_acceptance_fraction = self.sampler.tswap_acceptance_fraction
logging.info(
"Tswap acceptance fraction: {}".format(
self.sampler.tswap_acceptance_fraction
)
)
self.autocorr_time = self.sampler.get_autocorr_time(window=window)
logging.info("Autocorrelation length: {}".format(self.autocorr_time))
def _estimate_run_time(self):
"""Print the estimated run time
Uses timing coefficients based on a Lenovo T460p Intel(R)
Core(TM) i5-6300HQ CPU @ 2.30GHz.
"""
# Todo: add option to time on a machine, and move coefficients to
# ~/.pyfstat.conf
if (
type(self.theta_prior["Alpha"]) == dict
or type(self.theta_prior["Delta"]) == dict
):
tau0LD = 5.2e-7
tau0T = 1.5e-8
tau0S = 1.2e-4
tau0C = 5.8e-6
else:
tau0LD = 1.3e-7
tau0T = 1.5e-8
tau0S = 9.1e-5
tau0C = 5.5e-6
Nsfts = (self.maxStartTime - self.minStartTime) / 1800.0
if hasattr(self, "run_setup"):
ts = []
for row in self.run_setup:
nsteps = row[0]
nsegs = row[1]
numb_evals = np.sum(nsteps) * self.nwalkers * self.ntemps
t = (tau0S + tau0LD * Nsfts) * numb_evals
if nsegs > 1:
t += (tau0C + tau0T * Nsfts) * nsegs * numb_evals
ts.append(t)
time = np.sum(ts)
else:
numb_evals = np.sum(self.nsteps) * self.nwalkers * self.ntemps
time = (tau0S + tau0LD * Nsfts) * numb_evals
if getattr(self, "nsegs", 1) > 1:
time += (tau0C + tau0T * Nsfts) * self.nsegs * numb_evals
logging.info(
"Estimated run-time = {} s = {:1.0f}:{:1.0f} m".format(
time, *divmod(time, 60)
)
)
def run(
self,
proposal_scale_factor=2,
save_pickle=True,
export_samples=True,
save_loudest=True,
plot_walkers=True,
walker_plot_args=None,
window=50,
):
"""Run the MCMC simulatation
Parameters
----------
proposal_scale_factor: float
The proposal scale factor `a > 1` used by the sampler.
See <NAME> (Comm App Math Comp Sci, Vol 5, No. 1, 2010): 10.2140/camcos.2010.5.65.
The bigger the value, the wider the range to draw proposals from.
If the acceptance fraction is too low, you can raise it by
decreasing the `a` parameter; and if it is too high, you can reduce
it by increasing the `a` parameter.
See Foreman-Mackay et al. (PASP 125 306, 2013): https://arxiv.org/abs/1202.3665.
save_pickle: bool
If true, save a pickle file of the full sampler state.
export_samples: bool
If true, save ASCII samples file to disk. See `MCMCSearch.export_samples_to_disk`.
save_loudest: bool
If true, save a CFSv2 .loudest file to disk. See `MCMCSearch.generate_loudest`.
plot_walkers: bool
If true, save trace plots of the walkers.
walker_plot_args:
Dictionary passed as kwargs to _plot_walkers to control the plotting.
Histogram of sampled detection statistic values can be retrieved setting "plot_det_stat" to `True`.
Parameters corresponding to an injected signal can be passed through "injection_parameters"
as a dictionary containing the parameters of said signal. All parameters being searched for must
be present, otherwise this option is ignored.
If both "fig" and "axes" entries are set, the plot is not saved to disk
directly, but (fig, axes) are returned.
window: int
The minimum number of autocorrelation times needed to trust the
result when estimating the autocorrelation time (see
ptemcee.Sampler.get_autocorr_time for further details.
"""
self._initiate_search_object()
self.old_data_is_okay_to_use = self._check_old_data_is_okay_to_use()
if self.old_data_is_okay_to_use is True:
logging.warning("Using saved data from {}".format(self.pickle_path))
d = self.get_saved_data_dictionary()
self.samples = d["samples"]
self.lnprobs = d["lnprobs"]
self.lnlikes = d["lnlikes"]
self.all_lnlikelihood = d["all_lnlikelihood"]
self.chain = d["chain"]
return
self._estimate_run_time()
walker_plot_args = walker_plot_args or {}
self.sampler = PTSampler(
ntemps=self.ntemps,
nwalkers=self.nwalkers,
dim=self.ndim,
logl=self._logl,
logp=self._logp,
logpargs=(self.theta_prior, self.theta_keys, self.search),
loglargs=(self.search,),
betas=self.betas,
a=proposal_scale_factor,
)
p0 = self._generate_initial_p0()
p0 = self._apply_corrections_to_p0(p0)
self._check_initial_points(p0)
# Run initialisation steps if required
ninit_steps = len(self.nsteps) - 2
for j, n in enumerate(self.nsteps[:-2]):
logging.info(
"Running {}/{} initialisation with {} steps".format(j, ninit_steps, n)
)
self._run_sampler(p0, nburn=n, window=window)
if plot_walkers:
try:
walker_fig, walker_axes = self._plot_walkers(**walker_plot_args)
walker_fig.tight_layout()
walker_fig.savefig(
os.path.join(
self.outdir, "{}_init_{}_walkers.png".format(self.label, j)
)
)
plt.close(walker_fig)
except Exception as e:
logging.warning(
"Failed to plot initialisation walkers due to Error {}".format(
e
)
)
p0 = self._get_new_p0()
p0 = self._apply_corrections_to_p0(p0)
self._check_initial_points(p0)
self.sampler.reset()
if len(self.nsteps) > 1:
nburn = self.nsteps[-2]
else:
nburn = 0
nprod = self.nsteps[-1]
logging.info("Running final burn and prod with {} steps".format(nburn + nprod))
self._run_sampler(p0, nburn=nburn, nprod=nprod)
samples = self.sampler.chain[0, :, nburn:, :].reshape((-1, self.ndim))
lnprobs = self.sampler.logprobability[0, :, nburn:].reshape((-1))
lnlikes = self.sampler.loglikelihood[0, :, nburn:].reshape((-1))
all_lnlikelihood = self.sampler.loglikelihood[:, :, nburn:]
self.samples = samples
self.chain = self.sampler.chain
self.lnprobs = lnprobs
self.lnlikes = lnlikes
self.all_lnlikelihood = all_lnlikelihood
if save_pickle:
self._pickle_data(samples, lnprobs, lnlikes, all_lnlikelihood)
if export_samples:
self.export_samples_to_disk()
if save_loudest:
self.generate_loudest()
if plot_walkers:
try:
walkers_fig, walkers_axes = self._plot_walkers(
nprod=nprod, **walker_plot_args
)
walkers_fig.tight_layout()
except Exception as e:
logging.warning("Failed to plot walkers due to Error {}".format(e))
if (walker_plot_args.get("fig") is not None) and (
walker_plot_args.get("axes") is not None
):
self.walker_fig = walkers_fig
self.walker_axes = walkers_axes
else:
try:
walkers_fig.savefig(
os.path.join(self.outdir, self.label + "_walkers.png")
)
plt.close(walkers_fig)
except Exception as e:
logging.warning(
"Failed to save walker plots due to Error {}".format(e)
)
def _get_rescale_multiplier_for_key(self, key):
"""Get the rescale multiplier from the transform_dictionary
Can either be a float, a string (in which case it is interpretted as
a attribute of the MCMCSearch class, e.g. minStartTime, or non-existent
in which case 0 is returned
"""
if key not in self.transform_dictionary:
return 1
if "multiplier" in self.transform_dictionary[key]:
val = self.transform_dictionary[key]["multiplier"]
if type(val) == str:
if hasattr(self, val):
multiplier = getattr(
self, self.transform_dictionary[key]["multiplier"]
)
else:
raise ValueError("multiplier {} not a class attribute".format(val))
else:
multiplier = val
else:
multiplier = 1
return multiplier
def _get_rescale_subtractor_for_key(self, key):
"""Get the rescale subtractor from the transform_dictionary
Can either be a float, a string (in which case it is interpretted as
a attribute of the MCMCSearch class, e.g. minStartTime, or non-existent
in which case 0 is returned
"""
if key not in self.transform_dictionary:
return 0
if "subtractor" in self.transform_dictionary[key]:
val = self.transform_dictionary[key]["subtractor"]
if type(val) == str:
if hasattr(self, val):
subtractor = getattr(
self, self.transform_dictionary[key]["subtractor"]
)
else:
raise ValueError("subtractor {} not a class attribute".format(val))
else:
subtractor = val
else:
subtractor = 0
return subtractor
def _scale_samples(self, samples, theta_keys):
"""Scale the samples using the transform_dictionary"""
for key in theta_keys:
if key in self.transform_dictionary:
idx = theta_keys.index(key)
s = samples[:, idx]
subtractor = self._get_rescale_subtractor_for_key(key)
s = s - subtractor
multiplier = self._get_rescale_multiplier_for_key(key)
s *= multiplier
samples[:, idx] = s
return samples
def _get_labels(self, newline_units=False):
"""Combine the units, symbols and rescaling to give labels"""
labels = []
for key in self.theta_keys:
values = self.transform_dictionary.get(key, {})
s, label, u = [
values.get(slu_key, None) for slu_key in ["symbol", "label", "unit"]
]
if label is None:
s = s or self.symbol_dictionary[key].replace(
"_{glitch}", r"_\mathrm{glitch}"
)
u = u or self.unit_dictionary[key]
label = (
f"{s}"
+ ("\n" if newline_units else " ")
+ (f"[{u}]" if u != "" else "")
)
labels.append(label)
return labels
def plot_corner(
self,
figsize=(10, 10),
add_prior=False,
nstds=None,
label_offset=0.4,
dpi=300,
rc_context={},
tglitch_ratio=False,
fig_and_axes=None,
save_fig=True,
**kwargs,
):
"""Generate a corner plot of the posterior
Using the `corner` package (https://pypi.python.org/pypi/corner/),
generate estimates of the posterior from the production samples.
Parameters
----------
figsize: tuple (7, 7)
Figure size in inches (passed to plt.subplots)
add_prior: bool, str
If true, plot the prior as a red line. If 'full' then for uniform
priors plot the full extent of the prior.
nstds: float
The number of standard deviations to plot centered on the median.
Standard deviation is computed from the samples using `numpy.std`.
label_offset: float
Offset the labels from the plot: useful to prevent overlapping the
tick labels with the axis labels. This option is passed to `ax.[x|y]axis.set_label_coords`.
dpi: int
Passed to plt.savefig.
rc_context: dict
Dictionary of rc values to set while generating the figure (see
matplotlib rc for more details).
tglitch_ratio: bool
If true, and tglitch is a parameter, plot posteriors as the
fractional time at which the glitch occurs instead of the actual
time.
fig_and_axes: tuple
(fig, axes) tuple to plot on. The axes must be of the right shape,
namely (ndim, ndim)
save_fig: bool
If true, save the figure, else return the fig, axes.
**kwargs:
Passed to corner.corner. Use "truths" to plot the true parameters of a signal.
Returns
-------
fig, axes:
The matplotlib figure and axes, only returned if save_fig = False.
"""
if "truths" in kwargs:
if not isinstance(kwargs["truths"], dict):
raise ValueError("'truths' must be a dictionary.")
missing_keys = set(self.theta_keys) - kwargs["truths"].keys()
if missing_keys:
logging.warning(
f"plot_corner(): Missing keys {missing_keys} in 'truths' dictionary,"
" argument will be ignored."
)
kwargs["truths"] = None
else:
kwargs["truths"] = [kwargs["truths"][key] for key in self.theta_keys]
kwargs["truths"] = self._scale_samples(
np.reshape(kwargs["truths"], (1, -1)), self.theta_keys
).ravel()
if "truth_color" not in kwargs:
kwargs["truth_color"] = "black"
if self.ndim < 2:
with plt.rc_context(rc_context):
if fig_and_axes is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig, ax = fig_and_axes
ax.hist(self.samples, bins=50, histtype="stepfilled")
ax.set_xlabel(self.theta_symbols[0])
fig.savefig(os.path.join(self.outdir, self.label + "_corner.png"), dpi=dpi)
plt.close(fig)
return
with plt.rc_context(rc_context):
if fig_and_axes is None:
fig, axes = plt.subplots(self.ndim, self.ndim, figsize=figsize)
else:
fig, axes = fig_and_axes
samples_plt = copy.copy(self.samples)
labels = self._get_labels(newline_units=False)
samples_plt = self._scale_samples(samples_plt, self.theta_keys)
if tglitch_ratio:
for j, k in enumerate(self.theta_keys):
if k == "tglitch":
s = samples_plt[:, j]
samples_plt[:, j] = (s - self.minStartTime) / (
self.maxStartTime - self.minStartTime
)
labels[j] = r"$R_{\mathrm{glitch}}$"
if type(nstds) is int and "range" not in kwargs:
_range = []
for j, s in enumerate(samples_plt.T):
median = np.median(s)
std = np.std(s)
_range.append((median - nstds * std, median + nstds * std))
elif "range" in kwargs:
_range = kwargs.pop("range")
else:
_range = None
hist_kwargs = kwargs.pop("hist_kwargs", dict())
if "density" not in hist_kwargs:
hist_kwargs["density"] = True
fig_triangle = corner.corner(
samples_plt,
labels=labels,
fig=fig,
bins=50,
max_n_ticks=4,
plot_contours=True,
plot_datapoints=True,
label_kwargs={"fontsize": 12},
data_kwargs={"alpha": 0.1, "ms": 0.5},
range=_range,
hist_kwargs=hist_kwargs,
show_titles=True,
fill_contours=True,
quantiles=[0.05, 0.95]
if "quantiles" not in kwargs
else kwargs.pop("quantiles"),
verbose=True if "verbose" not in kwargs else kwargs.pop("verbose"),
**kwargs,
)
axes_list = fig_triangle.get_axes()
axes = np.array(axes_list).reshape(self.ndim, self.ndim)
plt.draw()
for ax in axes[:, 0]:
ax.yaxis.set_label_coords(-label_offset, 0.5)
for ax in axes[-1, :]:
ax.xaxis.set_label_coords(0.5, -label_offset)
for ax in axes_list:
ax.set_rasterized(True)
ax.set_rasterization_zorder(-10)
for tick in ax.xaxis.get_major_ticks():
# tick.label1.set_fontsize(8)
tick.label1.set_rotation(30)
for tick in ax.yaxis.get_major_ticks():
# tick.label1.set_fontsize(8)
tick.label1.set_rotation(30)
plt.tight_layout()
fig.subplots_adjust(hspace=0.1, wspace=0.1)
if add_prior:
self._add_prior_to_corner(axes, self.samples, add_prior)
if save_fig:
fig_triangle.savefig(
os.path.join(self.outdir, self.label + "_corner.png"), dpi=dpi
)
plt.close(fig_triangle)
else:
return fig, axes
def plot_chainconsumer(self, save_fig=True, label_offset=0.25, dpi=300, **kwargs):
"""Generate a corner plot of the posterior using the `chaniconsumer` package.
`chainconsumer` is an optional dependency of PyFstat. See https://samreay.github.io/ChainConsumer/.
Parameters are akin to the ones described in MCMCSearch.plot_corner.
Only the differing parameters are explicitly described.
Parameters
----------
**kwargs:
Passed to chainconsumer.plotter.plot. Use "truths" to plot the true parameters of a signal.
"""
try:
import chainconsumer
except ImportError:
logging.warning(
"Could not import 'chainconsumer' package, please install it to use this method."
)
return
samples_plt = copy.copy(self.samples)
labels = self._get_labels(newline_units=True)
samples_plt = self._scale_samples(samples_plt, self.theta_keys)
if "truth" in kwargs:
if not isinstance(kwargs["truth"], dict):
raise ValueError("'truth' must be a dictionary.")
missing_keys = np.setdiff1d(self.theta_keys, list(kwargs["truth"].keys()))
if len(missing_keys) > 0:
logging.warning(
"plot_chainconsumer(): Missing keys {} in 'truth' dictionary,"
" argument will be ignored.".format(missing_keys)
)
kwargs["truth"] = None
else:
parameters_in_order = np.array(
[kwargs["truth"][key] for key in self.theta_keys]
).reshape((1, -1))
kwargs["truth"] = self._scale_samples(
parameters_in_order, self.theta_keys
).ravel()
c = chainconsumer.ChainConsumer()
c.add_chain(samples_plt, parameters=labels)
# We set usetex=False to avoid dependency on 'kpsewhich' TeX tool
c.configure(smooth=0, summary=False, sigma2d=True, usetex=False)
fig = c.plotter.plot(**kwargs)
axes_list = fig.get_axes()
axes = np.array(axes_list).reshape(self.ndim, self.ndim)
plt.draw()
for ax in axes[:, 0]:
ax.yaxis.set_label_coords(-label_offset, 0.5)
for ax in axes[-1, :]:
ax.xaxis.set_label_coords(0.5, -label_offset)
for ax in axes_list:
ax.set_rasterized(True)
ax.set_rasterization_zorder(-10)
plt.tight_layout(h_pad=0.0, w_pad=0.0)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
if save_fig:
fig.savefig(
os.path.join(self.outdir, self.label + "_chainconsumer_corner.png"),
dpi=dpi,
)
plt.close(fig)
else:
return fig, axes
def _add_prior_to_corner(self, axes, samples, add_prior):
for i, key in enumerate(self.theta_keys):
ax = axes[i][i]
s = samples[:, i]
lnprior = self._generic_lnprior(**self.theta_prior[key])
if add_prior == "full" and self.theta_prior[key]["type"] == "unif":
lower = self.theta_prior[key]["lower"]
upper = self.theta_prior[key]["upper"]
r = upper - lower
xlim = [lower - 0.05 * r, upper + 0.05 * r]
x = np.linspace(xlim[0], xlim[1], 1000)
else:
xlim = ax.get_xlim()
x = np.linspace(s.min(), s.max(), 1000)
multiplier = self._get_rescale_multiplier_for_key(key)
subtractor = self._get_rescale_subtractor_for_key(key)
ax.plot(
(x - subtractor) * multiplier,
[np.exp(lnprior(xi)) for xi in x],
"-C3",
label="prior",
)
for j in range(i, self.ndim):
axes[j][i].set_xlim(xlim[0], xlim[1])
for k in range(0, i):
axes[i][k].set_ylim(xlim[0], xlim[1])
def _get_prior_bounds(self, normal_stds=2):
"""Get the lower/upper bounds of all priors
Parameters
----------
normal_stds: float
Number of standard deviations to cut normal (Gaussian) or half-norm
distributions at.
Returns
-------
prior_bounds: dict
Dictionary of ["lower","upper"] pairs for each parameter
norm_warning: bool
A flag that is true if any parameter has a norm or half-norm prior.
Caller functions may wish to warn the user that the prior has
been truncated at normal_stds.
"""
prior_bounds = {}
norm_trunc_warning = False
for key in self.theta_keys:
prior_bounds[key] = {}
prior_dict = self.theta_prior[key]
norm_trunc_warning = "norm" in prior_dict["type"] or norm_trunc_warning
if prior_dict["type"] == "unif":
prior_bounds[key]["lower"] = prior_dict["lower"]
prior_bounds[key]["upper"] = prior_dict["upper"]
elif prior_dict["type"] == "log10unif":
prior_bounds[key]["lower"] = 10 ** prior_dict["log10lower"]
prior_bounds[key]["upper"] = 10 ** prior_dict["log10upper"]
elif prior_dict["type"] == "norm":
prior_bounds[key]["lower"] = (
prior_dict["loc"] - normal_stds * prior_dict["scale"]
)
prior_bounds[key]["upper"] = (
prior_dict["loc"] + normal_stds * prior_dict["scale"]
)
elif prior_dict["type"] == "halfnorm":
prior_bounds[key]["lower"] = prior_dict["loc"]
prior_bounds[key]["upper"] = (
prior_dict["loc"] + normal_stds * prior_dict["scale"]
)
elif prior_dict["type"] == "neghalfnorm":
prior_bounds[key]["upper"] = prior_dict["loc"]
prior_bounds[key]["lower"] = (
prior_dict["loc"] - normal_stds * prior_dict["scale"]
)
elif prior_dict["type"] == "lognorm":
prior_bounds[key]["lower"] = np.exp(
prior_dict["loc"] - normal_stds * prior_dict["scale"]
)
prior_bounds[key]["upper"] = np.exp(
prior_dict["loc"] + normal_stds * prior_dict["scale"]
)
else:
raise ValueError(
"Not implemented for prior type {}".format(prior_dict["type"])
)
return prior_bounds, norm_trunc_warning
def plot_prior_posterior(
self,
normal_stds=2,
injection_parameters=None,
fig_and_axes=None,
save_fig=True,
):
"""Plot the prior and posterior probability distributions in the same figure
Parameters
----------
normal_stds: int
Bounds of priors in terms of their standard deviation. Only used if
`norm`, `halfnorm`, `neghalfnorm` or `lognorm` priors are given, otherwise ignored.
injection_parameters: dict
Dictionary containing the parameters of a signal. All parameters being searched must be
present as dictionary keys, otherwise this option is ignored.
fig_and_axes: tuple
(fig, axes) tuple to plot on.
save_fig: bool
If true, save the figure, else return the fig, axes.
Returns
-------
(fig, ax): (matplotlib.pyplot.figure, matplotlib.pyplot.axes)
If `save_fig` evaluates to `False`, return figure and axes.
"""
# Check injection parameters first
injection_parameters = injection_parameters or {}
missing_keys = set(self.theta_keys) - injection_parameters.keys()
if missing_keys:
logging.warning(
f"plot_prior_posterior(): Missing keys {missing_keys} in 'injection_parameters',"
" no injection parameters will be highlighted."
)
injection_parameters = None
if fig_and_axes is None:
fig, axes = plt.subplots(nrows=self.ndim, figsize=(8, 4 * self.ndim))
else:
fig, ax = fig_and_axes
if self.ndim == 1:
axes = [axes]
N = 1000
from scipy.stats import gaussian_kde
prior_bounds, _ = self._get_prior_bounds(normal_stds)
for i, (ax, key) in enumerate(zip(axes, self.theta_keys)):
prior_dict = self.theta_prior[key]
ln_prior_func = self._generic_lnprior(**prior_dict)
x = np.linspace(prior_bounds[key]["lower"], prior_bounds[key]["upper"], N)
prior = np.exp([ln_prior_func(xi) for xi in x]) # may not be vectorized
priorln = ax.plot(x, prior, "C3", label="prior")
ax.set(xlabel=self.theta_symbols[i], yticks=[])
s = self.samples[:, i]
while len(s) > 10 ** 4:
# random downsample to avoid slow calculation of kde
s = np.random.choice(s, size=int(len(s) / 2.0))
kde = gaussian_kde(s)
ax2 = ax.twinx()
postln = ax2.plot(x, kde.pdf(x), "k", label="posterior")
ax2.set(yticks=[], yticklabels=[])
if injection_parameters is not None:
injection = ax.axvline(
injection_parameters[key],
label="Injection",
color="purple",
ls="--",
)
plotlines = priorln + postln
labs = [plotline.get_label() for plotline in plotlines]
if injection_parameters is not None:
plotlines.append(injection)
labs.append("injection")
axes[0].legend(plotlines, labs, loc=1, framealpha=0.8)
if save_fig:
fig.savefig(os.path.join(self.outdir, self.label + "_prior_posterior.png"))
plt.close(fig)
else:
return fig, axes
def plot_cumulative_max(self, **kwargs):
"""Plot the cumulative twoF for the maximum posterior estimate.
This method accepts the same arguments as `pyfstat.core.ComputeFstat.plot_twoF_cumulative`,
except for `CFS_input`, which is taken from the loudest candidate; and `label` and `outdir`,
which are taken from the instance of this class.
For example, one can pass signal arguments to predic_twoF_cumulative through `PFS_kwargs`, or
set the number of segments using `num_segments_(CFS|PFS)`. The same applies for other options
such as `tstart`, `tend` or `savefig`. Every single of these arguments will be passed to
`pyfstat.core.ComputeFstat.plot_twoF_cumulative` as they are, using their default argument
otherwise.
See `pyfstat.core.ComputeFstat.plot_twoF_cumulative` for a comprehensive list of accepted
arguments and their default values.
Unlike the core function, here savefig=True is the default,
for consistency with other MCMC plotting functions.
"""
logging.info("Getting cumulative 2F")
d, maxtwoF = self.get_max_twoF()
for key, val in self.theta_prior.items():
if key not in d:
d[key] = val
if kwargs.get("savefig") is None:
kwargs["savefig"] = True
self.search.plot_twoF_cumulative(
CFS_input=d, label=self.label, outdir=self.outdir, **kwargs
)
def _generic_lnprior(self, **kwargs):
"""Return a lambda function of the pdf
Parameters
----------
**kwargs:
A dictionary containing 'type' of pdf and shape parameters
"""
def log_of_unif(x, a, b):
above = x < b
below = x > a
if type(above) is not np.ndarray:
if above and below:
return -np.log(b - a)
else:
return -np.inf
else:
idxs = np.array([all(tup) for tup in zip(above, below)])
p = np.zeros(len(x)) - np.inf
p[idxs] = -np.log(b - a)
return p
def log_of_log10unif(x, log10lower, log10upper):
log10x = np.log10(x)
above = log10x < log10upper
below = log10x > log10lower
if type(above) is not np.ndarray:
if above and below:
return -np.log(x * np.log(10) * (log10upper - log10lower))
else:
return -np.inf
else:
idxs = np.array([all(tup) for tup in zip(above, below)])
p = np.zeros(len(x)) - np.inf
p[idxs] = -np.log(x * np.log(10) * (log10upper - log10lower))
return p
def log_of_halfnorm(x, loc, scale):
if x < loc:
return -np.inf
else:
return -0.5 * (
(x - loc) ** 2 / scale ** 2 + np.log(0.5 * np.pi * scale ** 2)
)
def cauchy(x, x0, gamma):
return 1.0 / (np.pi * gamma * (1 + ((x - x0) / gamma) ** 2))
def exp(x, x0, gamma):
if x > x0:
return np.log(gamma) - gamma * (x - x0)
else:
return -np.inf
if kwargs["type"] == "unif":
return lambda x: log_of_unif(x, kwargs["lower"], kwargs["upper"])
if kwargs["type"] == "log10unif":
return lambda x: log_of_log10unif(
x, kwargs["log10lower"], kwargs["log10upper"]
)
elif kwargs["type"] == "halfnorm":
return lambda x: log_of_halfnorm(x, kwargs["loc"], kwargs["scale"])
elif kwargs["type"] == "neghalfnorm":
return lambda x: log_of_halfnorm(-x, kwargs["loc"], kwargs["scale"])
elif kwargs["type"] == "norm":
return lambda x: -0.5 * (
(x - kwargs["loc"]) ** 2 / kwargs["scale"] ** 2
+ np.log(2 * np.pi * kwargs["scale"] ** 2)
)
elif kwargs["type"] == "lognorm":
# as of scipy 1.4.1 and numpy 1.18.1 the following parametrisation
# should be consistent with np.random.lognormal in _generate_rv()
return lambda x: lognorm.pdf(
x, s=kwargs["scale"], scale=np.exp(kwargs["loc"])
)
else:
logging.info("kwargs:", kwargs)
raise ValueError("Prior pdf type {:s} unknown.".format(kwargs["type"]))
def _generate_rv(self, **kwargs):
dist_type = kwargs.pop("type")
if dist_type == "unif":
return np.random.uniform(low=kwargs["lower"], high=kwargs["upper"])
if dist_type == "log10unif":
return 10 ** (
np.random.uniform(low=kwargs["log10lower"], high=kwargs["log10upper"])
)
if dist_type == "norm":
return np.random.normal(loc=kwargs["loc"], scale=kwargs["scale"])
if dist_type == "halfnorm":
return np.abs(np.random.normal(loc=kwargs["loc"], scale=kwargs["scale"]))
if dist_type == "neghalfnorm":
return -1 * np.abs(
np.random.normal(loc=kwargs["loc"], scale=kwargs["scale"])
)
if dist_type == "lognorm":
return np.random.lognormal(mean=kwargs["loc"], sigma=kwargs["scale"])
else:
raise ValueError("dist_type {} unknown".format(dist_type))
def _plot_walkers(
self,
symbols=None,
alpha=0.8,
color="k",
temp=0,
lw=0.1,
nprod=0,
add_det_stat_burnin=False,
fig=None,
axes=None,
xoffset=0,
injection_parameters=None,
plot_det_stat=False,
context="ggplot",
labelpad=5,
):
"""Plot all the chains from a sampler"""
if injection_parameters is not None:
if not isinstance(injection_parameters, dict):
raise ValueError("injection_parameters is not a dictionary")
missing_keys = set(self.theta_keys) - injection_parameters.keys()
if missing_keys:
logging.warning(
f"plot_walkers(): Missing keys {missing_keys} in 'injection_parameters',"
" argument will be ignored."
)
injection_parameters = None
else:
scaled_injection_parameters = {
key: (
injection_parameters[key]
- self._get_rescale_subtractor_for_key(key)
)
* self._get_rescale_multiplier_for_key(key)
for key in injection_parameters.keys()
}
if symbols is None:
symbols = self._get_labels()
if context not in plt.style.available:
raise ValueError(
(
"The requested context {} is not available; please select a"
" context from `plt.style.available`"
).format(context)
)
if np.ndim(axes) > 1:
axes = axes.flatten()
shape = self.sampler.chain.shape
if len(shape) == 3:
nwalkers, nsteps, ndim = shape
chain = self.sampler.chain[:, :, :].copy()
if len(shape) == 4:
ntemps, nwalkers, nsteps, ndim = shape
if temp < ntemps:
logging.info("Plotting temperature {} chains".format(temp))
else:
raise ValueError(
("Requested temperature {} outside of" "available range").format(
temp
)
)
chain = self.sampler.chain[temp, :, :, :].copy()
samples = chain.reshape((nwalkers * nsteps, ndim))
samples = self._scale_samples(samples, self.theta_keys)
chain = chain.reshape((nwalkers, nsteps, ndim))
if plot_det_stat:
extra_subplots = 1
else:
extra_subplots = 0
with plt.style.context((context)):
if fig is None and axes is None:
fig = plt.figure(figsize=(4, 3.0 * ndim))
ax = fig.add_subplot(ndim + extra_subplots, 1, 1)
axes = [ax] + [
fig.add_subplot(ndim + extra_subplots, 1, i)
for i in range(2, ndim + 1)
]
idxs = np.arange(chain.shape[1])
burnin_idx = chain.shape[1] - nprod
last_idx = burnin_idx
if ndim > 1:
for i in range(ndim):
axes[i].ticklabel_format(useOffset=False, axis="y")
cs = chain[:, :, i].T
if burnin_idx > 0:
axes[i].plot(
xoffset + idxs[: last_idx + 1],
cs[: last_idx + 1],
color="C3",
alpha=alpha,
lw=lw,
)
axes[i].axvline(xoffset + last_idx, color="k", ls="--", lw=0.5)
axes[i].plot(
xoffset + idxs[burnin_idx:],
cs[burnin_idx:],
color="k",
alpha=alpha,
lw=lw,
)
if injection_parameters is not None:
axes[i].axhline(
scaled_injection_parameters[self.theta_keys[i]],
ls="--",
lw=2.0,
color="orange",
)
axes[i].set_xlim(0, xoffset + idxs[-1])
if symbols:
axes[i].set_ylabel(symbols[i], labelpad=labelpad)
else:
axes[0].ticklabel_format(useOffset=False, axis="y")
cs = chain[:, :, temp].T
if burnin_idx:
axes[0].plot(
idxs[:burnin_idx],
cs[:burnin_idx],
color="C3",
alpha=alpha,
lw=lw,
)
axes[0].plot(
idxs[burnin_idx:], cs[burnin_idx:], color="k", alpha=alpha, lw=lw
)
if injection_parameters is not None:
axes[0].axhline(
scaled_injection_parameters[self.theta_keys[0]],
ls="--",
lw=5.0,
color="orange",
)
if symbols:
axes[0].set_ylabel(symbols[0], labelpad=labelpad)
axes[-1].set_xlabel(r"Number of steps", labelpad=0.2)
if plot_det_stat:
if len(axes) == ndim:
axes.append(fig.add_subplot(ndim + 1, 1, ndim + 1))
lnl = self.sampler.loglikelihood[temp, :, :]
if burnin_idx and add_det_stat_burnin:
burn_in_vals = lnl[:, :burnin_idx].flatten()
try:
detstat_burnin = (
burn_in_vals[~np.isnan(burn_in_vals)] - self.likelihoodcoef
) / self.likelihooddetstatmultiplier
axes[-1].hist(
detstat_burnin, bins=50, histtype="step", color="C3"
)
except ValueError:
logging.info(
"Histogram of detection statistic failed, "
"most likely all values were the same."
)
pass
else:
detstat_burnin = []
prod_vals = lnl[:, burnin_idx:].flatten()
try:
detstat = (
prod_vals[~np.isnan(prod_vals)] - self.likelihoodcoef
) / self.likelihooddetstatmultiplier
axes[-1].hist(detstat, bins=50, histtype="step", color="k")
except ValueError:
logging.info(
"Histogram of detection statistic failed, "
"most likely all values were the same."
)
pass
if self.BSGL:
axes[-1].set_xlabel(r"$\log_{10}\mathcal{B}_\mathrm{S/GL}$")
else:
axes[-1].set_xlabel(r"$\widetilde{2\mathcal{F}}$")
axes[-1].set_ylabel(r"$\mathrm{Counts}$")
combined_vals = np.append(detstat_burnin, detstat)
if len(combined_vals) > 0:
minv = np.min(combined_vals)
maxv = np.max(combined_vals)
Range = abs(maxv - minv)
axes[-1].set_xlim(minv - 0.1 * Range, maxv + 0.1 * Range)
xfmt = matplotlib.ticker.ScalarFormatter()
xfmt.set_powerlimits((-4, 4))
axes[-1].xaxis.set_major_formatter(xfmt)
return fig, axes
def _apply_corrections_to_p0(self, p0):
"""Apply any correction to the initial p0 values"""
return p0
def _generate_scattered_p0(self, p):
"""Generate a set of p0s scattered about p"""
p0 = [
[
p + self.scatter_val * p * np.random.randn(self.ndim)
for i in range(self.nwalkers)
]
for j in range(self.ntemps)
]
return p0
def _generate_initial_p0(self):
"""Generate a set of init vals for the walkers"""
if type(self.theta_initial) == dict:
logging.info("Generate initial values from initial dictionary")
if hasattr(self, "nglitch") and self.nglitch > 1:
raise ValueError("Initial dict not implemented for nglitch>1")
p0 = [
[
[
self._generate_rv(**self.theta_initial[key])
for key in self.theta_keys
]
for i in range(self.nwalkers)
]
for j in range(self.ntemps)
]
elif self.theta_initial is None:
logging.info("Generate initial values from prior dictionary")
p0 = [
[
[
self._generate_rv(**self.theta_prior[key])
for key in self.theta_keys
]
for i in range(self.nwalkers)
]
for j in range(self.ntemps)
]
else:
raise ValueError("theta_initial not understood")
return p0
def _get_new_p0(self):
"""Returns new initial positions for walkers are burn0 stage
This returns new positions for all walkers by scattering points about
the maximum posterior with scale `scatter_val`.
"""
temp_idx = 0
pF = self.sampler.chain[temp_idx, :, :, :]
lnl = self.sampler.loglikelihood[temp_idx, :, :]
lnp = self.sampler.logprobability[temp_idx, :, :]
# General warnings about the state of lnp
if np.any(np.isnan(lnp)):
logging.warning(
"Of {} lnprobs {} are nan".format(np.shape(lnp), np.sum(np.isnan(lnp)))
)
if np.any(np.isposinf(lnp)):
logging.warning(
"Of {} lnprobs {} are +np.inf".format(
np.shape(lnp), np.sum(np.isposinf(lnp))
)
)
if np.any(np.isneginf(lnp)):
logging.warning(
"Of {} lnprobs {} are -np.inf".format(
np.shape(lnp), np.sum(np.isneginf(lnp))
)
)
lnp_finite = copy.copy(lnp)
lnp_finite[np.isinf(lnp)] = np.nan
idx = np.unravel_index(np.nanargmax(lnp_finite), lnp_finite.shape)
p = pF[idx]
p0 = self._generate_scattered_p0(p)
logging.info(
(
"Gen. new p0 from pos {} which had det. stat.={:2.1f}"
" and lnp={:2.1f}"
).format(idx[1], lnl[idx], lnp_finite[idx])
)
return p0
def _get_data_dictionary_to_save(self):
d = dict(
nsteps=self.nsteps,
nwalkers=self.nwalkers,
ntemps=self.ntemps,
theta_keys=self.theta_keys,
theta_prior=self.theta_prior,
log10beta_min=self.log10beta_min,
BSGL=self.BSGL,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
)
return d
def _pickle_data(self, samples, lnprobs, lnlikes, all_lnlikelihood):
d = self._get_data_dictionary_to_save()
d["samples"] = samples
d["lnprobs"] = lnprobs
d["lnlikes"] = lnlikes
d["chain"] = self.sampler.chain
d["all_lnlikelihood"] = all_lnlikelihood
if os.path.isfile(self.pickle_path):
logging.info(
"Saving backup of {} as {}.old".format(
self.pickle_path, self.pickle_path
)
)
os.rename(self.pickle_path, self.pickle_path + ".old")
with open(self.pickle_path, "wb") as File:
pickle.dump(d, File)
def get_saved_data_dictionary(self):
"""Read the data saved in `self.pickel_path` and return it as a dictionary.
Returns
--------
d: dict
Dictionary containing the data saved in the pickle `self.pickle_path`.
"""
with open(self.pickle_path, "rb") as File:
d = pickle.load(File)
return d
def _check_old_data_is_okay_to_use(self):
if os.path.isfile(self.pickle_path) is False:
logging.info("No pickled data found")
return False
if self.sftfilepattern is not None:
oldest_sft = min(
[os.path.getmtime(f) for f in self._get_list_of_matching_sfts()]
)
if os.path.getmtime(self.pickle_path) < oldest_sft:
logging.info("Pickled data outdates sft files")
return False
old_d = self.get_saved_data_dictionary().copy()
new_d = self._get_data_dictionary_to_save().copy()
old_d.pop("samples")
old_d.pop("lnprobs")
old_d.pop("lnlikes")
old_d.pop("all_lnlikelihood")
old_d.pop("chain")
for key in "minStartTime", "maxStartTime":
if new_d[key] is None:
new_d[key] = old_d[key]
setattr(self, key, new_d[key])
mod_keys = []
for key in list(new_d.keys()):
if key in old_d:
if new_d[key] != old_d[key]:
mod_keys.append((key, old_d[key], new_d[key]))
else:
raise ValueError("Keys {} not in old dictionary".format(key))
if len(mod_keys) == 0:
return True
else:
logging.warning("Saved data differs from requested")
logging.info("Differences found in following keys:")
for key in mod_keys:
if len(key) == 3:
if np.isscalar(key[1]) or key[0] == "nsteps":
logging.info(" {} : {} -> {}".format(*key))
else:
logging.info(" " + key[0])
else:
logging.info(key)
return False
def _get_savetxt_fmt_dict(self):
fmt_dict = helper_functions.get_doppler_params_output_format(self.theta_keys)
fmt_dict["twoF"] = "%.9g"
if self.BSGL:
fmt_dict["log10BSGL"] = "%.9g"
return fmt_dict
def _get_savetxt_gmt_list(self):
"""Returns a list of output format specifiers, ordered like the samples
This is required because the output of _get_savetxt_fmt_dict()
will depend on the order in which those entries have been coded up.
"""
fmt_dict = self._get_savetxt_fmt_dict()
fmt_list = [fmt_dict[key] for key in self.output_keys]
return fmt_list
def export_samples_to_disk(self):
"""
Export MCMC samples into a text file using `numpy.savetxt`.
"""
self.samples_file = os.path.join(self.outdir, self.label + "_samples.dat")
logging.info("Exporting samples to {}".format(self.samples_file))
header = "\n".join(self.output_file_header)
header += "\n" + " ".join(self.output_keys)
outfmt = self._get_savetxt_gmt_list()
samples_out = copy.copy(self.samples)
# For convenience, we always save a twoF column,
# even if log10BSGL was used for the likelihood.
detstat = np.atleast_2d(self._get_detstat_from_loglikelihood()).T
if self.BSGL:
twoF = np.zeros_like(detstat)
self.search.BSGL = False
for idx, samp in enumerate(self.samples):
p = self._set_point_for_evaluation(samp)
if isinstance(p, dict):
twoF[idx] = self.search.get_det_stat(**p)
else:
twoF[idx] = self.search.get_det_stat(*p)
self.search.BSGL = self.BSGL
samples_out = np.concatenate((samples_out, twoF), axis=1)
# TODO: add single-IFO F-stats?
samples_out = np.concatenate((samples_out, detstat), axis=1)
Ncols = np.shape(samples_out)[1]
if len(outfmt) != Ncols:
raise RuntimeError(
"Lengths of data rows ({:d})"
" and output format ({:d})"
" do not match."
" If your search class uses different"
" keys than the base MCMCSearch class,"
" override the _get_savetxt_fmt_dict"
" method.".format(Ncols, len(outfmt))
)
np.savetxt(
self.samples_file,
samples_out,
delimiter=" ",
header=header,
fmt=outfmt,
)
def _get_detstat_from_loglikelihood(self, idx=None):
"""Inverts the extra terms applied in logl()."""
return (
self.lnlikes[idx if idx is not None else ...] - self.likelihoodcoef
) / self.likelihooddetstatmultiplier
def get_max_twoF(self):
"""Get the max. likelihood (loudest) sample and the compute
its corresponding detection statistic.
The employed detection statistic depends on `self.BSGL`
(i.e. 2F if `self.BSGL` evaluates to `False`, log10BSGL otherwise).
Returns
-------
d: dict
Parameters of the loudest sample.
maxtwoF: float
Detection statistic (2F or log10BSGL) corresponding to the loudest sample.
"""
if not hasattr(self, "search"):
raise RuntimeError(
"Object has no self.lnlikes attribute, please execute .run() first."
)
if any(np.isposinf(self.lnlikes)):
logging.info("lnlike values contain positive infinite values")
if any(np.isneginf(self.lnlikes)):
logging.info("lnlike values contain negative infinite values")
if any(np.isnan(self.lnlikes)):
logging.info("lnlike values contain nan")
idxs = np.isfinite(self.lnlikes)
jmax = np.nanargmax(self.lnlikes[idxs])
d = OrderedDict()
if self.BSGL:
# need to recompute twoF at the max likelihood
if hasattr(self, "search") is False:
self._initiate_search_object()
p = self._set_point_for_evaluation(self.samples[jmax])
self.search.BSGL = False
if isinstance(p, dict):
maxtwoF = self.search.get_det_stat(**p)
else:
maxtwoF = self.search.get_det_stat(*p)
self.search.BSGL = self.BSGL
else:
# can just reuse the logl value
maxtwoF = self._get_detstat_from_loglikelihood(jmax)
repeats = []
for i, k in enumerate(self.theta_keys):
if k in d and k not in repeats:
d[k + "_0"] = d[k] # relabel the old key
d.pop(k)
repeats.append(k)
if k in repeats:
k = k + "_0"
count = 1
while k in d:
k = k.replace("_{}".format(count - 1), "_{}".format(count))
count += 1
d[k] = self.samples[jmax][i]
return d, maxtwoF
def get_summary_stats(self):
"""Returns a dict of point estimates for all production samples.
Point estimates are computed on the MCMC samples using `numpy.mean`,
`numpy.std` and `numpy.quantiles` with q=[0.005, 0.05, 0.25, 0.5, 0.75, 0.95, 0.995].
Returns
-------
d: dict
Dictionary containing point estimates corresponding to ["mean", "std", "lower99",
"lower90", "lower50", "median", "upper50", "upper90", "upper99"].
"""
d = OrderedDict()
repeats = [] # taken from old get_median_stds(), not sure why necessary
for s, k in zip(self.samples.T, self.theta_keys):
if k in d and k not in repeats:
d[k + "_0"] = d[k] # relabel the old key
d.pop(k)
repeats.append(k)
if k in repeats:
k = k + "_0"
count = 1
while k in d:
k = k.replace("_{}".format(count - 1), "_{}".format(count))
count += 1
d[k] = {}
d[k]["mean"] = np.mean(s)
d[k]["std"] = np.std(s)
(
d[k]["lower99"],
d[k]["lower90"],
d[k]["lower50"],
d[k]["median"],
d[k]["upper50"],
d[k]["upper90"],
d[k]["upper99"],
) = np.quantile(s, [0.005, 0.05, 0.25, 0.5, 0.75, 0.95, 0.995])
return d
def check_if_samples_are_railing(self, threshold=0.01):
"""Returns a boolean estimate of if the samples are railing
Parameters
----------
threshold: float [0, 1]
Fraction of the uniform prior to test (at upper and lower bound)
Returns
-------
return_flag: bool
IF true, the samples are railing
"""
return_flag = False
for s, k in zip(self.samples.T, self.theta_keys):
prior = self.theta_prior[k]
if prior["type"] == "unif":
prior_range = prior["upper"] - prior["lower"]
edges = []
fracs = []
for bound in ["lower", "upper"]:
bools = np.abs(s - prior[bound]) / prior_range < threshold
if np.any(bools):
edges.append(bound)
fracs.append(str(100 * float(np.sum(bools)) / len(bools)))
if len(edges) > 0:
logging.warning(
"{}% of the {} posterior is railing on the {} edges".format(
"% & ".join(fracs), k, " & ".join(edges)
)
)
return_flag = True
return return_flag
def write_par(self, method="median"):
"""Writes a .par of the best-fit params with an estimated std
Parameters
----------
method: str
How to select the `best-fit` params. Available methods: "median", "mean", "twoFmax".
"""
if method == "med":
method = "median"
if method in ["median", "mean"]:
summary_stats = self.get_summary_stats()
filename = os.path.join(self.outdir, self.label + "_" + method + ".par")
logging.info("Writing {} using {} parameters.".format(filename, method))
elif method == "twoFmax":
max_twoF_d, max_twoF = self.get_max_twoF()
filename = os.path.join(self.outdir, self.label + "_max2F.par")
logging.info("Writing {} at max twoF = {}.".format(filename, max_twoF))
else:
raise ValueError("Method '{}' not supported.".format(method))
with open(filename, "w+") as f:
for hline in self.output_file_header:
f.write("# {:s}\n".format(hline))
if method == "twoFmax":
f.write("MaxtwoF = {}\n".format(max_twoF))
f.write("tref = {}\n".format(self.tref))
if hasattr(self, "theta0_index"):
f.write("theta0_index = {}\n".format(self.theta0_idx))
if method in ["median", "mean"]:
for key, stat_d in summary_stats.items():
f.write(
"{} = {:1.16e}\n".format(
key,
stat_d[method],
)
)
elif method == "twoFmax":
for key, val in max_twoF_d.items():
f.write("{} = {:1.16e}\n".format(key, val))
def generate_loudest(self):
"""Use lalapps_ComputeFstatistic_v2 to produce a .loudest file"""
max_params, max_twoF = self.get_max_twoF()
for key in self.theta_prior:
if key not in max_params:
max_params[key] = self.theta_prior[key]
max_params = self.translate_keys_to_lal(max_params)
for key in ["transient-t0Epoch", "transient-t0Offset", "transient-tau"]:
if key in max_params and not int(max_params[key]) == max_params[key]:
rounded = int(round(max_params[key]))
logging.warning(
"Rounding {:s}={:f} to {:d} for CFSv2 call.".format(
key, max_params[key], rounded
)
)
max_params[key] = rounded
signal_parameter_keys = list(
self.translate_keys_to_lal(self.theta_prior).keys()
)
par_keys = list(max_params.keys())
pardiff = np.setdiff1d(par_keys, signal_parameter_keys)
if len(pardiff) > 0:
raise RuntimeError(
f"Dictionary for parameters at max2F point {par_keys}"
" did include keys"
# " (other than refTime)"
" not expected from signal parameters being searched over:"
f" {pardiff} not in {signal_parameter_keys}."
)
self.loudest_file = helper_functions.generate_loudest_file(
max_params=max_params,
tref=self.tref,
outdir=self.outdir,
label=self.label,
sftfilepattern=self.sftfilepattern,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
transientWindowType=getattr(self, "transientWindowType", None),
earth_ephem=self.earth_ephem,
sun_ephem=self.sun_ephem,
)
def write_prior_table(self):
"""Generate a .tex file of the prior"""
with open(os.path.join(self.outdir, self.label + "_prior.tex"), "w") as f:
f.write(
r"\begin{tabular}{c l c} \hline" + "\n"
r"Parameter & & & \\ \hhline{====}"
)
for key, prior in self.theta_prior.items():
if type(prior) is dict:
Type = prior["type"]
if Type == "unif":
a = prior["lower"]
b = prior["upper"]
line = r"{} & $\mathrm{{Unif}}$({}, {}) & {}\\"
elif Type == "norm":
a = prior["loc"]
b = prior["scale"]
line = r"{} & $\mathcal{{N}}$({}, {}) & {}\\"
elif Type == "halfnorm":
a = prior["loc"]
b = prior["scale"]
line = r"{} & $|\mathcal{{N}}$({}, {})| & {}\\"
u = self.unit_dictionary[key]
s = self.symbol_dictionary[key]
f.write("\n")
a = helper_functions.texify_float(a)
b = helper_functions.texify_float(b)
f.write(" " + line.format(s, a, b, u) + r" \\")
f.write("\n\\end{tabular}\n")
def print_summary(self):
"""Prints a summary of the max twoF found to the terminal"""
max_twoFd, max_twoF = self.get_max_twoF()
summary_stats = self.get_summary_stats()
logging.info("Summary:")
if hasattr(self, "theta0_idx"):
logging.info("theta0 index: {}".format(self.theta0_idx))
logging.info("Max twoF: {} with parameters:".format(max_twoF))
for k in np.sort(list(max_twoFd.keys())):
logging.info(" {:10s} = {:1.9e}".format(k, max_twoFd[k]))
logging.info("Mean +- std for production values:")
for k in np.sort(list(summary_stats.keys())):
logging.info(
" {:10s} = {:1.9e} +/- {:1.9e}".format(
k, summary_stats[k]["mean"], summary_stats[k]["std"]
)
)
logging.info("Median and 90% quantiles for production values:")
for k in np.sort(list(summary_stats.keys())):
logging.info(
" {:10s} = {:1.9e} - {:1.9e} + {:1.9e}".format(
k,
summary_stats[k]["median"],
summary_stats[k]["median"] - summary_stats[k]["lower90"],
summary_stats[k]["upper90"] - summary_stats[k]["median"],
)
)
logging.info("\n")
def _CF_twoFmax(self, theta, twoFmax, ntrials):
Fmax = twoFmax / 2.0
return (
np.exp(1j * theta * twoFmax)
* ntrials
/ 2.0
* Fmax
* np.exp(-Fmax)
* (1 - (1 + Fmax) * np.exp(-Fmax)) ** (ntrials - 1)
)
def _pdf_twoFhat(self, twoFhat, nglitch, ntrials, twoFmax=100, dtwoF=0.1):
if np.ndim(ntrials) == 0:
ntrials = np.zeros(nglitch + 1) + ntrials
twoFmax_int = np.arange(0, twoFmax, dtwoF)
theta_int = np.arange(-1 / dtwoF, 1.0 / dtwoF, 1.0 / twoFmax)
CF_twoFmax_theta = np.array(
[
[
np.trapz(self._CF_twoFmax(t, twoFmax_int, ntrial), twoFmax_int)
for t in theta_int
]
for ntrial in ntrials
]
)
CF_twoFhat_theta = np.prod(CF_twoFmax_theta, axis=0)
pdf = (1 / (2 * np.pi)) * np.array(
[
np.trapz(
np.exp(-1j * theta_int * twoFhat_val) * CF_twoFhat_theta, theta_int
)
for twoFhat_val in twoFhat
]
)
return pdf.real
def _p_val_twoFhat(self, twoFhat, ntrials, twoFhatmax=500, Npoints=1000):
"""Caluculate the p-value for the given twoFhat in Gaussian noise
Parameters
----------
twoFhat: float
The observed twoFhat value
ntrials: int, array of len Nglitch+1
The number of trials for each glitch+1
"""
twoFhats = np.linspace(twoFhat, twoFhatmax, Npoints)
pdf = self._pdf_twoFhat(twoFhats, self.nglitch, ntrials)
return np.trapz(pdf, twoFhats)
def get_p_value(self, delta_F0=0, time_trials=0):
"""Gets the p-value for the maximum twoFhat value assuming Gaussian noise
Parameters
----------
delta_F0: float
Frequency variation due to a glitch.
time_trials: int, optional
Number of trials in each glitch + 1.
"""
d, max_twoF = self.get_max_twoF()
if self.nglitch == 1:
tglitches = [d["tglitch"]]
else:
tglitches = [d["tglitch_{}".format(i)] for i in range(self.nglitch)]
tboundaries = [self.minStartTime] + tglitches + [self.maxStartTime]
deltaTs = np.diff(tboundaries)
ntrials = [time_trials + delta_F0 * dT for dT in deltaTs]
p_val = self._p_val_twoFhat(max_twoF, ntrials)
logging.info("p-value = {}".format(p_val))
return p_val
def compute_evidence(self, make_plots=False, write_to_file=None):
"""Computes the evidence/marginal likelihood for the model.
Parameters
----------
make_plots: bool
Plot the results and save them to os.path.join(self.outdir, self.label + "_beta_lnl.png")
write_to_file: str
If given, dump evidence and uncertainty estimation to the specified path.
Returns
-------
log10evidence: float
Estimation of the log10 evidence.
log10evidence_err: float
Log10 uncertainty of the evidence estimation.
"""
betas = self.betas
mean_lnlikes = np.mean(np.mean(self.all_lnlikelihood, axis=1), axis=1)
mean_lnlikes = mean_lnlikes[::-1]
betas = betas[::-1]
if any(np.isinf(mean_lnlikes)):
logging.warning(
"mean_lnlikes contains inf: recalculating without"
" the {} infs".format(len(betas[np.isinf(mean_lnlikes)]))
)
idxs = np.isinf(mean_lnlikes)
mean_lnlikes = mean_lnlikes[~idxs]
betas = betas[~idxs]
log10evidence = np.trapz(mean_lnlikes, betas) / | np.log(10) | numpy.log |
import numpy as np
def majority_voting(masks, voting='hard', weights=None, threshold=0.5):
"""Soft Voting/Majority Rule mask merging; Signature based upon the Scikit-learn VotingClassifier (https://github.com/scikit-learn/scikit-learn/blob/2beed55847ee70d363bdbfe14ee4401438fba057/sklearn/ensemble/_voting.py#L141)
Parameters
----------
masks : segmentations masks to merge, ndarray
Expected shape is num_of_masks * 1 * h * w
Accepts masks in range 0-1 (i.e apply sigmoid before passing to this function)
voting : {'hard', 'soft'}, default='hard'
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like of shape (n_classifiers,), default=None
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
threshold : for separating between the positive and negative class, default=0.5
Applied first in case of hard voting and applied last in case of soft voting
"""
assert len(masks.shape) == 4
if voting not in ('soft', 'hard'):
raise ValueError(f"Voting must be 'soft' or 'hard'; got (voting= {voting})")
for m in masks:
assert (m >= 0.).all() and (m <= 1.).all()
if voting == 'hard':
masks = (masks >= threshold).astype(np.float32)
if weights is None:
weights = np.array([1] * masks.shape[0])
else:
weights = np.array(weights)
# Broadcasting starts with the trailing (i.e. rightmost) dimensions and works its way left, therefore we move the "mask" dimension to the right
masks= np.transpose(masks, (1, 2, 3, 0))
masks = masks * weights
masks= np.transpose(masks, (3, 0, 1, 2))
masks = masks.sum(axis=0)
if voting == 'soft':
masks = (masks >= (threshold * weights.sum())).astype(np.float32)
elif voting == 'hard': # Same as doing a majority vote
masks = (masks > (0.5 * weights.sum())).astype(np.float32)
assert len(masks.shape) == 3
return masks.astype(np.float32)
def test_majority_voting():
m1 = np.zeros((1,2,2))
m2 = np.ones((1,2,2))
m3 = np.array([[[0.4, 0.4],
[0.4, 0.4]]])
m4 = np.array([[[0.6, 0.6],
[0.6, 0.6]]])
m5 = np.array([[[0.7, 0.7],
[0.2, 0.1]]])
m6 = np.array([[[0.55, 0.1],
[0.2, 0.6]]])
masks = np.stack([m1, m2], axis=0)
assert (majority_voting(masks, voting='hard') == np.array([[[0., 0.], [0., 0.]]])).all() # since threshold is >
masks = np.stack([m1, m2], axis=0)
assert (majority_voting(masks, weights=[2,1]) == np.array([[[0., 0.], [0., 0.]]])).all()
masks = np.stack([m1, m2, m3], axis=0)
assert (majority_voting(masks) == np.array([[[0., 0.], [0., 0.]]])).all()
masks = np.stack([m3, m4], axis=0)
assert (majority_voting(masks, weights=[2,1]) == np.array([[[0., 0.], [0., 0.]]])).all()
assert (majority_voting(masks, weights=[1,2]) == np.array([[[1., 1.], [1., 1.]]])).all()
masks = np.stack([m1, m2, m3, m4, m5], axis=0)
assert (majority_voting(masks) == np.array([[[1., 1.], [0., 0.]]])).all()
masks = | np.stack([m4, m5, m6], axis=0) | numpy.stack |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import platform
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.amp as amp
import megengine.config as config
import megengine.core.ops.builtin as builtin
import megengine.core.tensor.dtype as dtype
import megengine.functional as F
import megengine.jit as jit
from megengine import Parameter, Tensor, is_cuda_available, tensor
from megengine.core._trace_option import use_symbolic_shape
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.device import get_device_count
from megengine.module import LayerNorm
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.bool_)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.bool_)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
maskv2 = np.array([1, 1, 1], dtype=np.bool_)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.bool_)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
def test_dropout():
from megengine.autodiff import GradManager
from megengine.core._imperative_rt.ops import set_global_rng_seed
def test_dropout_with_shape(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out = F.nn.dropout(data, rate, training=True)
gm.backward(out, tensor(np.ones(shape, dtype=np.float32)))
assert not out.numpy().all()
np.testing.assert_allclose(out.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_multiple_dropout(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(out1, rate, training=True)
out3 = F.nn.dropout(out2, rate, training=True)
gm.backward(out3, tensor(np.ones(shape, dtype=np.float32)))
np.testing.assert_allclose(out3.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_dropout_seed(shape, rate):
data = tensor(np.random.randn(*shape), dtype="float32")
set_global_rng_seed(111)
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(data, rate, training=True)
assert not (out1.numpy() == out2.numpy()).all()
set_global_rng_seed(111)
out3 = F.nn.dropout(data, rate, training=True)
assert (out1.numpy() == out3.numpy()).all()
set_global_rng_seed(222)
out4 = F.nn.dropout(data, rate, training=True)
assert not (out1.numpy() == out4.numpy()).all()
test_dropout_with_shape([13, 17, 63, 21], 0.4)
test_dropout_with_shape([16, 32, 64], 0.3)
test_multiple_dropout([1024], 0.2)
test_dropout_seed([16, 32], 0.2)
def test_matinv():
shape1 = (5, 5)
shape2 = (3, 9, 9)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
# make matrix diagonally dominant for numerical stability
data1 += (np.eye(shape1[0]) * shape1[0]).astype("float32")
data2 += np.broadcast_to((np.eye(shape2[1]) * shape2[1]).astype("float32"), shape2)
cases = [
{"input": data1},
{"input": data2},
]
opr_test(
cases,
F.matinv,
compare_fn=lambda x, y: np.testing.assert_allclose(x.numpy(), y, rtol=1e-4),
ref_fn=np.linalg.inv,
)
def test_matmul():
shape1 = 3
shape2 = 3
shape3 = (3, 5)
shape4 = (5, 6)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
batch_size = 10
shape1 = (2,)
shape2 = (batch_size, 2, 3)
shape3 = (batch_size, 3, 4)
shape4 = (batch_size, 10, 4, 2)
shape5 = (batch_size, 10, 2, 4)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
data5 = np.random.random(shape5).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
{"input": [data4, data5]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
opr_test(
[{"input": [data1, data4]}],
F.matmul,
ref_fn=lambda x, y: np.matmul(x, y.transpose(0, 1, 3, 2)),
transpose_b=True,
)
opr_test(
[{"input": [data3, data2]}],
F.matmul,
ref_fn=lambda x, y: np.matmul(x.transpose(0, 2, 1), y.transpose(0, 2, 1)),
transpose_a=True,
transpose_b=True,
)
@pytest.mark.parametrize(
"shape_a, shape_b", [((0,), (0,)), ((10, 0), (0, 10)), ((3, 10, 0), (3, 0, 10)),],
)
@pytest.mark.parametrize("is_symbolic", [None, True, False])
def test_matmul_empty_tensor(shape_a, shape_b, is_symbolic):
def func(a, b):
return F.matmul(a, b)
if is_symbolic is not None:
func = jit.trace(symbolic=is_symbolic)(func)
a = tensor(np.random.randn(*shape_a))
b = tensor(np.random.randn(*shape_b))
for _ in range(3):
out = func(a, b)
assert np.all(out.numpy() == 0)
if is_symbolic is None:
break
def test_interpolate():
def linear_interpolate():
inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
out = F.vision.interpolate(inp, scale_factor=2.0, mode="linear")
out2 = F.vision.interpolate(inp, 4, mode="linear")
np.testing.assert_allclose(
out.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
)
np.testing.assert_allclose(
out2.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
)
def many_batch_interpolate():
inp = tensor(np.arange(1, 9, dtype=np.float32).reshape(2, 1, 2, 2))
out = F.vision.interpolate(inp, [4, 4])
out2 = F.vision.interpolate(inp, scale_factor=2.0)
np.testing.assert_allclose(out.numpy(), out2.numpy())
def assign_corner_interpolate():
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
out = F.vision.interpolate(inp, [4, 4], align_corners=True)
out2 = F.vision.interpolate(inp, scale_factor=2.0, align_corners=True)
np.testing.assert_allclose(out.numpy(), out2.numpy())
def error_shape_linear_interpolate():
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
with pytest.raises(ValueError):
F.vision.interpolate(inp, scale_factor=2.0, mode="linear")
def inappropriate_scale_linear_interpolate():
inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
with pytest.raises(ValueError):
F.vision.interpolate(inp, scale_factor=[2.0, 3.0], mode="linear")
linear_interpolate()
many_batch_interpolate()
assign_corner_interpolate()
error_shape_linear_interpolate()
inappropriate_scale_linear_interpolate()
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
def _gen_roi_inp():
inp_feat = np.random.randn(2, 32, 256, 256)
rois = np.zeros((4, 5))
rois[:, 0] = [0, 0, 1, 1]
rois[:, 1:3] = np.random.rand(4, 2) * 100
rois[:, 3:] = np.random.rand(4, 2) * 100 + 150
inp_feat = tensor(inp_feat)
rois = tensor(rois)
return inp_feat, rois
def test_roi_align():
inp_feat, rois = _gen_roi_inp()
grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
output_shape = (7, 7)
out_feat = F.vision.roi_align(
inp_feat,
rois,
output_shape=output_shape,
mode="average",
spatial_scale=1.0 / 4,
sample_points=2,
aligned=True,
)
assert make_shape_tuple(out_feat.shape) == (
rois.shape[0],
inp_feat.shape[1],
*output_shape,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
def _gen_correlation(random=True, constant=1, image_shape=(2, 1, 160, 160)):
if random:
inp_feat1 = np.random.randn(
image_shape[0], image_shape[1], image_shape[2], image_shape[3]
)
inp_feat2 = np.random.randn(
image_shape[0], image_shape[1], image_shape[2], image_shape[3]
)
else:
inp_feat1 = np.ones(image_shape) * constant
inp_feat2 = np.ones(image_shape) * constant
return tensor(inp_feat1), tensor(inp_feat2)
def test_correlation():
##test case 0 check the grad shape
data1, data2 = _gen_correlation()
grad = Grad().wrt(data1, callback=_save_to(data1))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=5,
max_displacement=4,
stride1=2,
stride2=2,
pad_size=2,
is_multiply=True,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(data1.grad.shape) == make_shape_tuple(data1.shape)
##test case 1 from https://github.com/NVIDIA/flownet2-pytorch/issues/194
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=True,
)
assert abs(out_feat.sum() - 1) < 1e-9
##test case 2 check same image subduction
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=False,
)
assert out_feat.sum() < 1e-9
##test case 3 check same image subduction
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=False,
)
assert out_feat.sum() < 1e-9
##test case 4 check correlation
data1, _ = _gen_correlation(
random=False, image_shape=(1, 1, 220, 220), constant=2.0
)
_, data2 = _gen_correlation(
random=False, image_shape=(1, 1, 220, 220), constant=1.0
)
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=2,
stride1=1,
stride2=2,
pad_size=0,
is_multiply=False,
)
assert abs(out_feat.mean() - 1) < 1e-9
def test_roi_pooling():
inp_feat, rois = _gen_roi_inp()
grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
output_shape = (7, 7)
out_feat = F.vision.roi_pooling(
inp_feat, rois, output_shape=output_shape, mode="max", scale=1.0 / 4,
)
assert make_shape_tuple(out_feat.shape) == (
rois.shape[0],
inp_feat.shape[1],
*output_shape,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
def test_adaptive_avg_pool2d():
inp = tensor(np.arange(0, 16, dtype=np.float32).reshape(1, 1, 4, 4))
oshp = (2, 2)
grad = Grad().wrt(inp, callback=_save_to(inp))
outp = F.adaptive_avg_pool2d(inp, oshp,)
assert make_shape_tuple(outp.shape) == (inp.shape[0], inp.shape[1], *oshp,)
np.testing.assert_equal(
outp.numpy(), np.array([[[[2.5, 4.5], [10.5, 12.5]]]], dtype=np.float32)
)
grad(outp, tensor(F.ones_like(outp)))
assert make_shape_tuple(inp.grad.shape) == make_shape_tuple(inp.shape)
np.testing.assert_equal(
inp.grad.numpy(),
np.array(
[
[
[
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
]
]
],
dtype=np.float32,
),
)
def test_adaptive_max_pool2d():
inp = tensor(np.arange(0, 16, dtype=np.float32).reshape(1, 1, 4, 4))
oshp = (2, 2)
grad = Grad().wrt(inp, callback=_save_to(inp))
outp = F.adaptive_max_pool2d(inp, oshp,)
assert make_shape_tuple(outp.shape) == (inp.shape[0], inp.shape[1], *oshp,)
np.testing.assert_equal(
outp.numpy(), np.array([[[[5, 7], [13, 15]]]], dtype=np.float32)
)
grad(outp, tensor(F.ones_like(outp)))
assert make_shape_tuple(inp.grad.shape) == make_shape_tuple(inp.shape)
np.testing.assert_equal(
inp.grad.numpy(),
np.array(
[
[
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
]
]
],
dtype=np.float32,
),
)
def test_one_hot():
def onehot_low_dimension():
inp = tensor(np.arange(1, 4, dtype=np.int32))
out = F.one_hot(inp, num_classes=4)
np.testing.assert_allclose(
out.numpy(), np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)]
)
def onehot_high_dimension():
arr = np.array(
[[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]],
dtype=np.int32,
)
inp = tensor(arr)
out = F.one_hot(inp, 10)
np.testing.assert_allclose(out.numpy(), np.eye(10, dtype=np.int32)[arr])
onehot_low_dimension()
onehot_high_dimension()
def test_interpolate_fastpath():
# check shape
test_cases = [
[(1, 1, 10, 10), (5, 5)],
[(1, 3, 10, 10), (20, 20)],
[(10, 1, 10, 10), (1, 1)],
# [(10, 10, 1, 1), (10, 10)], # FIXME, it causes random CI failure
]
for inp_shape, target_shape in test_cases:
x = tensor(np.random.randn(*inp_shape), dtype=np.float32)
out = F.vision.interpolate(x, target_shape, mode="bilinear")
assert out.shape[0] == x.shape[0] and out.shape[1] == x.shape[1]
assert out.shape[2] == target_shape[0] and out.shape[3] == target_shape[1]
# check value
x = tensor(np.ones((3, 3, 10, 10)), dtype=np.float32)
out = F.vision.interpolate(x, (15, 5), mode="bilinear")
np.testing.assert_equal(out.numpy(), np.ones((3, 3, 15, 5)).astype(np.float32))
np_x = np.arange(32)
x = tensor(np_x).astype(np.float32).reshape(1, 1, 32, 1)
out = F.vision.interpolate(x, (1, 1), mode="bilinear")
np.testing.assert_equal(out.item(), np_x.mean())
@pytest.mark.parametrize("dt", [np.float32, np.int8, np.uint8, np.float16])
def test_warp_perspective(dt):
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=dt).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
outp = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(outp.numpy(), np.array([[[[5, 6], [9, 10]]]], dtype=dt))
@pytest.mark.parametrize("dt", [np.float32, np.int8, np.uint8, np.float16])
def test_warp_perspective_mat_idx(dt):
inp_shape = (2, 1, 4, 4)
x = tensor(np.arange(32, dtype=dt).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
M = F.concat([M,] * 4, 0)
outp = F.vision.warp_perspective(x, M, (2, 2), mat_idx=[0, 1, 1, 0])
np.testing.assert_equal(
outp.numpy(),
np.array(
[
[[[5, 6], [9, 10]]],
[[[21, 22], [25, 26]]],
[[[21, 22], [25, 26]]],
[[[5, 6], [9, 10]]],
],
dtype=dt,
),
)
def test_warp_affine():
inp_shape = (1, 3, 3, 3)
x = tensor(np.arange(27, dtype=np.float32).reshape(inp_shape))
weightv = [[[1.26666667, 0.6, -83.33333333], [-0.33333333, 1, 66.66666667]]]
outp = F.vision.warp_affine(x, tensor(weightv), (2, 2), border_mode="wrap")
res = np.array(
[
[
[[7.875, 8.875, 9.875], [8.90625, 9.90625, 10.90625]],
[[18.75, 19.75, 20.75], [14.90625, 15.90625, 16.90625]],
]
],
dtype=np.float32,
)
if not is_cuda_available():
np.testing.assert_almost_equal(outp.numpy(), res, 5)
def test_remap():
inp_shape = (1, 1, 4, 4)
inp = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
map_xy_shape = (1, 2, 2, 2)
map_xy = tensor(
np.array(
[[[1.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]], dtype=np.float32
).reshape(map_xy_shape)
)
outp = F.vision.remap(inp, map_xy)
np.testing.assert_equal(
outp.numpy(), | np.array([[[[1.0, 4.0], [4.0, 4.0]]]], dtype=np.float32) | numpy.array |
import numpy as np
import cv2
import os
from os import listdir
from os.path import isfile, join
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping, Callback as TfCallback
marked_path = r'C:\Users\ASUS\PycharmProjects\AI_Image\sso_image\scnu-sso-captcha-master\src\dataset\codes\small_mark/'
files = [f for f in listdir(marked_path)]
np.random.shuffle(files)
def cv_im_process(img, flatten=False, normalize=False):
img = cv2.resize(img, (100, 75), interpolation=cv2.INTER_AREA)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图
# 二值化
im2 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 15, 15)
# 去除噪点,尤其是椒盐噪点
im3 = cv2.medianBlur(im2, 3)
# 线降噪
h, w = im3.shape[:2]
for y in range(1, w - 1):
for x in range(1, h - 1):
count = 0
if np.all(im3[x, y - 1] > 245):
count = count + 1
if np.all(im3[x, y + 1] > 245):
count = count + 1
if np.all(im3[x - 1, y] > 245):
count = count + 1
if np.all(im3[x + 1, y] > 245):
count = count + 1
if count > 2:
im3[x, y] = 255
im3 = im3
if flatten:
im3 = im3.flatten()
if normalize:
im3 = im3 / 255
return im3
idict = '0123456789abcdefghijklmnopqrstuvwxyz'
def text2vec(code): # ont-hot编码
code = code.lower()
print(code)
t = np.zeros((len(code), 36), np.float)
for i in range(t.shape[0]):
t[i][idict.index(code[i])] = 1
return t
def vec2text(t):
idx = | np.argmax(t, axis=1) | numpy.argmax |
# Methods related to statistical analysis of simulation output files
import numpy as np
import pandas as pd
import numba as nb
import warnings
from . import datautil
from . import util
class Seismograph:
"""Finds the avalanches and corresponding sizes and durations in a signal.
Parameters
----------
t : np.ndarray
Time
v : np.ndarray
Signal to be watched for avalanches
vt : float
Threshold value; events are found when the signal crosses this threshold
s : np.ndarray
Signal to be integrated during an avalanche to get avalanche sizes; by default, this is v-vt.
But it can also be different, e.g., a domain wall avalanche can be thresholded on the velocity,
but can be integrated to get the avalanche sizes in terms of Axy and Az.
If this parameter is applied, no threshold is subtracted from s before it is integrated.
"""
def __init__(self, t, v, vt, s=None):
self.t = t
self.v = v
self.vt = vt
self.s = s if s is not None else v - vt
if t.shape != v.shape != s.shape:
warnings.warn(f't, v, and s must be the same shape. (t, v, s): ({len(t)}, {len(v)}, {len(s)})')
self.sizes = np.zeros(0)
self.durations = np.zeros(0)
return
self.istart, self.istop = _events(v, vt)
# self.istart, self.istop = _remove_length1_events(*_events(v, vt))
self.tstart, self.tstop = self.t[self.istart], self.t[self.istop]
self.durations = self.tstop - self.tstart
self.sizes = _event_sizes(self.t, self.s, self.istart, self.istop)
return
def _remove_length1_events(istart, istop):
longer_than_1 = istop - istart > 1
return istart[longer_than_1], istop[longer_than_1]
def _start_indices(v, vt):
"""Find the starting indices of each event in the signal.
Parameters
----------
v : np.ndarray
Signal to be searched for avalanches
vt : float
Threshold value; events are found when the signal crosses this threshold
Returns
-------
np.ndarray
The starting indices of each event
"""
return np.nonzero(np.logical_and(v[1:] > vt, v[:-1] <= vt))[0]+1
def _end_indices(v, vt):
"""Find the stopping indices of each event in the signal.
Parameters
----------
v : np.ndarray
Signal to be searched for avalanches
vt : float
Threshold value; events are found when the signal crosses this threshold
Returns
-------
np.ndarray
The ending indices of each event
"""
return np.nonzero(np.logical_and(v[1:] <= vt, v[:-1] > vt))[0]+1
def _events(v, vt):
"""Return the starting and stopping indices of each event in the signal. The returned arrays have same size. The
first event is always ignored. If v[-1] > vt, the last event is considered to end at v[-1], unless v[-2] < vt,
in which case the last event is dropped to avoid issues with finding the event size.
Parameters
----------
v : np.ndarray
Signal to be searched for avalanches
vt : float
Threshold value; events are found when the signal crosses this threshold
Returns
-------
(np.ndarray, np.ndarray)
Pair of arrays: first array contains the starting indices of each event, the second array contains the ending
indices of each event
"""
i_start = _start_indices(v, vt)
i_stop = _end_indices(v, vt)
if i_start[0] > i_stop[0]:
i_stop = i_stop[1:]
if i_start[-1] > i_stop[-1]:
i_start = i_start[:-1]
if i_start.shape != i_stop.shape:
raise ValueError('Starting and stopping indices of avalanches do not have same number of elements.')
return i_start, i_stop
def _event_sizes(t, s, i_start, i_stop):
"""Compute the size of each avalanche in the signal. The size of an avalanche is the integral of the signal over
the time the signal is above the threshold. This integration is done using rectangles, with
dt[i] = (t[i+1]-t[i-1])/2
except at the edges, where the forward (for t[0]) or backward (for t[-1]) differences are taken.
Parameters
----------
t : np.ndarray
Time
s : np.ndarray
Signal to be integrated
i_start : np.ndarray
Array of starting indices of the events
i_stop : np.ndarray
Array of ending indices of the events
Returns
-------
np.ndarray
Size of each event
"""
# Calculate the central difference, and use forward and backward differences for the edge to preserve length
dt = np.hstack((np.array([t[1] - t[0]]), (t[2:]-t[:-2])*0.5, np.array([t[-1]-t[-2]])))
ret = np.empty(i_start.shape[0])
for i in range(len(i_start)):
ret[i] = np.sum(s[i_start[i]:i_stop[i]]*dt[i_start[i]:i_stop[i]], axis=0)
return ret
def bin_avg_event_shape(data, duration=None, tol=None, drange=None, nbins=None, norm=True, normy=False):
"""Get the binned-average event shapes from the data. Each event has a time array (t) and signal array (s).
If tol is specified and range is None, events of duration d which fall within
duration - tol < d < duration + tol
are collected. If drange is specified, events of duration d which fall within
drange[0] < d < drange[1]
are collected. The time arrays are then normalized to the interval [0, 1]. The time-axis is then divided into nbins
number of time bins, and the value of s in each bin is averaged across all events.
Parameters
----------
data : datautil.SimRun or datautil.Simdata
Data to analyze
duration : float
Set the duration of the bins to average. If None, drange is used.
tol : float or None
Sets the tolerance determining which events to include in the average. If None, drange is used.
drange : (float, float) or None
Sets the range determining which events to include in the average. If None, tol is used.
nbins : int
Number of bins to divide the time axis into. If nbins==None, uses the smallest number of bins possible; see
docstring for bin_avg()
norm : bool
Set to True to normalize the time to the interval [0, 1]
Returns
-------
4-tuple of np.ndarray
(event times, event signals, binned-normalized time, binned-average signal)
"""
if drange is None and tol is not None and duration is not None:
t, s = data.events_by_duration(duration-tol, duration+tol)
elif tol is None and duration is None and drange is not None:
t, s = data.events_by_duration(drange[0], drange[1])
else:
raise ValueError('Must specify either a range or tolerance for bin_avg_event_shape()')
t = normalize_t(t)
tbin, sbin = bin_avg(t, s, nbins=nbins, norm=norm, normy=normy)
return t, s, tbin, sbin
def bin_avg(t, s, nbins=None, norm=True, normy=False):
"""Bin and average the input signals. The times of each event are normalized from 0 to 1 if norm=True.
Parameters
----------
t : list or np.ndarray
Can be
1. A list of np.ndarrays, each containing a set of times (usually corresponding to measurements durng
an avalanche)
2. A 1D np.ndarray containing times
s : list or np.ndarray
Values of the signal measured at times t. Must be same shape as t.
nbins : int or None
Number of bins to use. If None, then the number of bins is set equal to the length of the smallest event array.
norm : bool
Scale the t-axis to [0, 1].
Returns
-------
t_bin : np.ndarray
Binned timesteps
s_bin : np.ndarray
Average value of the signal at each timestep
"""
if norm:
t = normalize_t(t)
if isinstance(t, list):
f_t = np.hstack(t).flatten()
else:
f_t = t.flatten()
if isinstance(s, list):
f_s = np.hstack(s).flatten()
else:
f_s = s.flatten()
if nbins is None:
nbins = np.min([len(_t) for _t in t])
t_bin = np.linspace(np.min(f_t), np.max(f_t), nbins+1) # Array of bin edges
s_bin = np.zeros(t_bin.shape)
else:
t_bin = np.linspace(np.min(f_t), np.max(f_t), nbins+1)
s_bin = np.zeros(nbins+1)
for i in range(nbins):
in_bin_i = np.nonzero(np.logical_and(t_bin[i] <= f_t, f_t < t_bin[i+1]))
s_bin[i] = np.mean(f_s[in_bin_i])
# in_last_bin = np.nonzero(np.logical_and(t_bin[-2] <= f_t, f_t <= t_bin[-1]))
# s_bin[-1] = np.mean(f_s[in_last_bin])
if normy == 'max':
return t_bin, (s_bin - np.min(s_bin))/(np.max(s_bin)-np.min(s_bin))
elif normy == 'area':
return t_bin, (s_bin)/np.trapz(s_bin, t_bin)
else:
return t_bin, s_bin
def normalize_t(t):
"""For a list of arrays, normalize each array to fall between 0 and 1.
Parameters
----------
t : list of np.ndarray
Returns
-------
list of np.ndarray
"""
return [(_t - np.min(_t))/(np.max(_t)-np.min(_t)) for _t in t]
def event_hists(data, bins, key='vdw'):
"""Get event histograms. The event sizes and durations are log-distributed; the absolute value of the abscissa
is taken before the log binning.
Parameters
----------
data : datautil.SimRun or datautil.SimData
Data from which to generate histograms
bins : int
Number of bins in which the data should be binned
Returns
-------
tuple of np.ndarray
size_bins, size_hist, time_bins, time_hist
"""
if isinstance(data, datautil.SimRun) or isinstance(data, datautil.SimData):
size_bins, size_hist = loghist(data.get_avalanche_sizes(key=key), bins)
time_bins, time_hist = loghist(data.get_avalanche_durations(), bins)
return size_bins, size_hist, time_bins, time_hist
else:
raise NotImplementedError
def loghist(_data, bins):
"""Generate bins and a histogram, properly normalized by bin size and number of samples, using log spaced bins.
The absolute value of the data is taken before being binned.
Parameters
----------
data : np.ndarray
Data from which to generate histograms
bins : int
Number of bins in which the data should be binned
Returns
-------
tuple of np.ndarray
bins, hist
"""
data = np.abs(_data)
logbins = np.logspace(np.log10(np.min(data)), np.log10(np.max(data)), bins)
# density=True apparently makes this a PDF by dividing by the bin width and sample size
hist, _ = np.histogram(data, bins=logbins, density=True)
# Normalize the distributions; the number of occurences in each bin is divided by the bin width and the sample size
# hist = hist/(np.diff(logbins)*len(data))
# Validate the histograms
util.validate_pdf(logbins, hist)
return logbins, hist
def avg_event_size(data, bins=40, key='vdw'):
sizes = data.get_avalanche_sizes(key=key)
times = data.get_avalanche_durations()
log_time_bins = np.logspace(np.log10(np.min(times)), np.log10(np.max(times)), bins+1) # Bin edges
avg_size = np.ones(bins)
for i in range(bins):
avg_size[i] = np.mean(sizes[np.logical_and(times > log_time_bins[i], times < log_time_bins[i+1])])
return log_time_bins, avg_size
@nb.jit(nopython=True)
def loghist2d(datax, datay, nbinsx, nbinsy):
datax = np.abs(datax)
datay = np.abs(datay)
# These define the bin edges.
binsx = 10**np.linspace(np.log10(np.min(datax)), np.log10(np.max(datax)), nbinsx+1)
binsy = 10**np.linspace(np.log10(np.min(datay)), np.log10(np.max(datay)), nbinsy+1)
hist = np.zeros((nbinsx, nbinsy))
bin_areas = np.outer((binsx[1:] - binsx[:-1]), (binsy[1:] - binsy[:-1]))
for i in range(datax.size):
# Find the correct bin to increment
_iy = np.nonzero(datay[i] >= binsy[:-1])[0]
_ix = np.nonzero(datax[i] >= binsx[:-1])[0]
iy = -1
ix = -1
if _iy.size > 0:
iy = _iy[-1]
if _ix.size > 0:
ix = _ix[-1]
# Increment
hist[iy, ix] += 1
hist = hist/(bin_areas*datax.size)
return hist, binsx, binsy
def joint_pdf_bin_areas(binsx, binsy):
"""Given a 1D set of bin edges along x and y, compute the 2D set of bin areas formed by the grid.
Parameters
----------
binsx : np.ndarray
1D array of bin edges
binsy : np.ndarray
1D array of bin edges along y
Returns
-------
np.ndarray
2D array of bin areas for the given bin sizes
"""
xbsize = binsx[1:] - binsx[:-1]
ybsize = binsy[1:] - binsy[:-1]
return np.outer(xbsize, ybsize)
# def joint_pdf_bin_centers(binsx, binsy):
def bin_centers(*bins):
"""Compute the centers of bins given the bin edges.
Parameters
----------
bins : np.ndarray
bin edges
Returns
-------
np.ndarray
bin centers. This array is 1 element shorter than the inputs.
"""
# return (binsx[1:] + binsx[:-1])*0.5, (binsy[1:] + binsy[:-1])*0.5
return [(b[1:] + b[:-1])*0.5 for b in bins]
def joint_pdf_mean_y(pdf, binsx, binsy):
"""From a joint PDF, calculate the mean along the y-direction for each x-bin.
Parameters
----------
pdf : np.ndarray
Array of shape (binsy.size - 1, binsx.size - 1). This is the probability density function
binsx : np.ndarray
Array of bin edges along the x direction
binsy : np.ndarray
Array of bin edges along the y direction
Returns
-------
np.ndarray
The mean along the y-direction for each x-bin; should be of shape (binsx.size - 2).
"""
# Get the bin centers
# bincx, bincy = joint_pdf_bin_centers(binsx, binsy)
bincx, bincy = bin_centers(binsx, binsy)
# Get the frequency distribution from the probability density by multiplying by bin areas
freq = pdf*joint_pdf_bin_areas(binsx, binsy)
# Find the frequency distribution along each column, effectively finding conditional probabilities P(X=x0, Y)
col_freq = freq/np.outer(np.ones(bincy.size), np.sum(freq, axis=0))
# Find the average y-value for each column
col_mean = np.sum(col_freq*np.outer(bincy, np.ones(bincx.size)), axis=0)
return col_mean
def extent(binsx, binsy):
"""Get the extent of the 2D histogram generated from binsx and binsy.
Parameters
----------
binsx : np.ndarray
Bin edges along x
binsy : np.ndarray
Bin edges along y
Returns
-------
np.ndarray
Array of [xmin, xmax, ymin, ymax]
"""
return np.array([binsx.min(), binsx.max(), binsy.min(), binsy.max()])
def lognan(pdf):
"""Compute the log10 of the input PDF. This function gets around errors associated with taking the log of a
histogram which has one or more bins with 0 events by masking those bins with np.nan values before taking the log.
Parameters
----------
pdf : np.ndarray
Input probability distribution function
Returns
-------
np.ndarray
Returns log10(pdf), except if there are any bins where the pdf == 0, those bins now have np.nan values.
"""
_pdf = pdf.copy()
_pdf[pdf <= 0] = np.nan
return | np.log10(_pdf) | numpy.log10 |
#!/usr/bin/env ipython
import gpxpy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from geopy.distance import vincenty
def gps_distance_elevation(fname):
segment = gpxpy.parse(open(fname + '.gpx', 'r')).tracks[0].segments[0]
elevation = []
loc = []
for p in segment.points:
elevation.append(p.elevation)
lat, lon = p.latitude, p.longitude
loc.append((lat, lon))
distance = np.array([0] + [vincenty(loc[i], loc[i-1]).meters for i in range(len(loc)-1)]).cumsum()
plt.plot(distance, elevation, label=fname)
plt.savefig(fname + '.png')
plt.clf()
return distance, elevation
def downsample_mountain(fname, length=30):
""" Downsample trace to specified length """
distance, elevation = gps_distance_elevation(fname)
d = np.linspace(distance[0], distance[-1], length)
e = np.interp(d, distance, elevation)
plt.plot(d, e, label=fname)
plt.savefig(fname + '_downsampled.png')
plt.clf()
assert len(d) == length
assert len(e) == length
return d, e
def get_mts():
d_m, e_m = downsample_mountain('tour-mont-blanc-anti-clockwise')
d_c, e_c = downsample_mountain('carrauntoohil')
# scale both
e_m -= np.mean(e_m)
e_m /= np.max(e_m)
e_c -= np.mean(e_c)
e_c /= np.max(e_c)
# combine
samples = | np.array([e_m, e_c]) | numpy.array |
#!/usr/bin/env python
""" Convert a svg file into 2D triangle mesh.
"""
import argparse
import logging
import pymesh
import numpy as np
from numpy.linalg import norm
import os.path
from subprocess import check_call
from time import time
def parse_args():
parser = argparse.ArgumentParser(__doc__);
parser.add_argument("--engine", help="Triangulation engine", choices=(
"triangle_conforming_delaunay",
"triangle_constrained_delaunay",
"cgal_constrained_delaunay",
"cgal_conforming_delaunay",
"geogram_delaunay",
"jigsaw_frontal_delaunay",
"mmg_delaunay", "triwild"),
default="triangle_conforming_delaunay");
parser.add_argument("--resolve-self-intersection", "-r", action="store_true");
parser.add_argument("--with-frame", '-f', action="store_true");
parser.add_argument("--with-cell-label", "-l", action="store_true");
parser.add_argument("--with-cleanup", "-c", action="store_true");
parser.add_argument("--with-triangulation", "-t", action="store_true");
parser.add_argument("--stage", type=int, default=1);
parser.add_argument("--epsilon", type=float, default=1e-3);
parser.add_argument("--log", type=str, help="Logging level",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
default="INFO");
parser.add_argument("--with-features", '-F', action="store_true",
help="TriWild specific option to preserve features");
parser.add_argument("input_svg");
parser.add_argument("output_mesh");
return parser.parse_args();
def get_logger(level):
numeric_level = getattr(logging, level, None);
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: {}'.format(level));
logging.basicConfig(level=numeric_level);
return logging.getLogger("PyMesh.Triangulation");
def drop_zero_dim(wires):
# Trim zero dimension.
if wires.dim == 3:
vertices = wires.vertices;
assert(np.all(vertices[:,2] == 0));
vertices = vertices[:, [0,1]];
wires.load(vertices, wires.edges);
return wires;
def cleanup(wires, logger):
if wires.num_vertices == 0:
return wires;
start_time = time();
tol = 1e-6;
vertices, edges, __ = pymesh.remove_duplicated_vertices_raw(
wires.vertices, wires.edges, tol);
# Remove duplicated edges.
ordered_edges = np.sort(edges, axis=1);
__, unique_edge_ids, __ = pymesh.unique_rows(ordered_edges);
edges = edges[unique_edge_ids, :];
wires.load(vertices, edges);
# Remove topologically degenerate edges.
is_not_topologically_degenerate = edges[:,0] != edges[:,1];
if not | np.all(is_not_topologically_degenerate) | numpy.all |
"""
Classes for generating pseudo-random surfaces based on description of FFT:
===========================================================================
===========================================================================
Each class inherits functionality from the Surface but changes the
__init__ and discretise functions
===========================================================================
===========================================================================
DiscFreqSurface:
Generate a surface containing only specific frequency components
ProbFreqSurface:
Generate a surface containing normally distributed amplitudes with a
specified function for the variance of the distribution based on the
frequency
DiscFreqSurface:
Generate a surface containing frequency components with amplitude
specified by a function of the frequency
===========================================================================
===========================================================================
"""
import typing
from numbers import Number
import numpy as np
from .Surface_class import _AnalyticalSurface, Surface
__all__ = ["DiscFreqSurface", "ProbFreqSurface", "HurstFractalSurface"]
class DiscFreqSurface(_AnalyticalSurface):
r""" Surfaces with discrete frequency components
This surface produces a profile with frequency components which are constant in th y direction,
these can be rotated and combined by addition or subtraction to give any combination of frequencies.
Parameters
----------
frequencies: Sequence[float]
The frequencies present in the surface
amplitudes: Sequence[float], optional ((1, ))
The amplitude of each frequency, must be same length as frequencies
phases: typing.Sequence[float] = (0,)
The phases of each of the frequencies, must be the same length as frequencies
rotation: Number = 0
If set the surface is rotated by the set amount, in radians
shift: tuple, optional (None)
If set the origin of the surface is shifted by the set amount in the x and y directions, should be a two element
tuple of float. By default, the surface is shifted so that the origin becomes the centre of the surface. To stop
this behaviour specify a shift of (0, 0)
generate: bool, optional (False)
If True the surface profile is generated on instantiation, else it can be generated by the discretise method.
If True two of the: grid_spacing, extent or shape must also be set
grid_spacing: float, optional (None)
The grid spacing of the surface profile
extent: tuple, optional (None)
The overall dimensions of the surface in the same units as the grid spacing, should be a two element tuple of
float
shape: tuple = None
The number of points in each direction of the surface array, should be a two element tuple of integers
See Also
--------
ProbFreqSurface
HurstFractalSurface
Notes
-----
Roughness functions are aliased from the functions provided in the surface
module
Examples
--------
>>> mySurf=DiscFreqSurface(10, 0.1)
>>> mySurf.extent=[0.5,0.5]
>>> mySurf.discretise(0.001)
"""
is_discrete = False
surface_type = 'discreteFreq'
def __init__(self, frequencies: typing.Sequence[float], amplitudes: typing.Sequence[float] = (1,),
phases: typing.Sequence[float] = (0,), rotation: Number = 0,
shift: typing.Optional[tuple] = None,
generate: bool = False, grid_spacing: float = None,
extent: tuple = None, shape: tuple = None):
if type(frequencies) is list or type(frequencies) is np.ndarray:
self.frequencies = frequencies
else:
raise ValueError('Frequencies, amplitudes and phases must be equal'
'length lists or np.arrays')
is_complex = [type(amp) is complex for amp in amplitudes]
if any(is_complex):
if not len(frequencies) == len(amplitudes):
raise ValueError('Frequencies, amplitudes and phases must be'
' equal length lists or np.arrays')
else:
self.amplitudes = amplitudes
else:
if not len(frequencies) == len(amplitudes) == len(phases):
raise ValueError('Frequencies, amplitudes and phases must be'
' equal length lists or np.arrays')
else:
cplx_amps = []
for idx in range(len(amplitudes)):
cplx_amps.append(amplitudes[idx] *
np.exp(1j * phases[idx]))
self.amplitudes = cplx_amps
super().__init__(generate=generate, rotation=rotation, shift=shift,
grid_spacing=grid_spacing, extent=extent, shape=shape)
def _height(self, x_mesh, y_mesh):
profile = np.zeros_like(x_mesh)
for idx in range(len(self.frequencies)):
profile += np.real(self.amplitudes[idx] *
np.exp(-1j * self.frequencies[idx] * x_mesh * 2 * np.pi))
return profile
def __repr__(self):
string = self._repr_helper()
return f'DiscFreqSurface({self.frequencies}, {self.amplitudes}{string})'
class ProbFreqSurface(_AnalyticalSurface):
"""
ProbFreqSurface(H, qr, qs)
Generates a surface with all possible frequencies in the fft represented
with amplitudes described by the probability distribution given as input.
Defaults to the parameters used in the contact mechanics challenge
This class only works for square 2D domains
For more information the definitions of the input parameters refer to
XXXXXX contact mechanics challenge paper
"""
is_discrete = False
surface_type = 'Random'
def __init__(self, h=2, qr=0.05, qs=10,
generate: bool = False, grid_spacing: float = None,
extent: tuple = None, shape: tuple = None):
self.h = h
self.qs = qs
self.qr = qr
super().__init__(grid_spacing=grid_spacing, extent=extent, shape=shape, generate=generate)
def rotate(self, radians: Number):
raise NotImplementedError("Probabilistic frequency surface cannot be rotated")
def shift(self, shift: tuple = None):
if shift is None:
return
raise NotImplementedError("Probabilistic frequency surface cannot be shifted")
def _height(self, x_mesh, y_mesh):
grid_spacing, extent, shape = check_coords_are_simple(x_mesh, y_mesh)
qny = np.pi / grid_spacing
u = np.linspace(0, qny, shape[0])
u_mesh, v_mesh = np.meshgrid(u, u)
freqs = np.abs(u_mesh + v_mesh)
varience = np.zeros(freqs.shape)
varience[np.logical_and((1 / freqs) > (1 / self.qr), (2 * np.pi / freqs) <= (extent[0]))] = 1
varience[np.logical_and((1 / freqs) >= (1 / self.qs), (1 / freqs) < (1 / self.qr))] = \
(freqs[np.logical_and(1 / freqs >= 1 / self.qs, 1 / freqs < 1 / self.qr)] / self.qr) ** (-2 * (1 + self.h))
fou_trans = np.reshape(np.array([np.random.normal() * var ** 0.5 for var in varience.flatten()]), freqs.shape)
return np.real(np.fft.ifft2(fou_trans))
def __repr__(self):
string = self._repr_helper()
return f'ProbFreqSurface(h={self.h}, qr={self.qr}, qs={self.qs}{string})'
class HurstFractalSurface(Surface):
r"""Hurst fractal surfaces
Parameters
----------
sigma: float
The RSM roughness of the surface
hurst_exponent: float
The hurst exponent, must be between 0 and 1, related to the fractal dimension by D = 3-H
roll_off_frequency: float, optional (0.0)
generate: bool, optional (False)
If true the surface profile is generated on instantiation, two of: grid_spacing, extent or shape must be set
grid_spacing: float, optional (None)
The grid spacing of the surface profile
extent: tuple, optional (None)
The overall surface dimensions in the x and y directions
shape: tuple, optional (None)
The number of grid points in the x and y directions, computation is faster for powers of 2
See Also
--------
ProbFreqSurface
RandomSurface
surface_like
Notes
-----
generates a hurst fractal surface with frequency components from q0 to
cut off frequency in even steps of q0.
amplitudes are given by:
q0 amplitude\*\*2 \*((h\*\*2+k\*\*2)/2)\*\*(1-Hurst parameter)
where h,k = \-N...N
where N=cut off frequency/ q0
phases are randomly generated on construction of the surface object,
repeated calls to the discretise function will discretise on the same surface
but repeated calls to this class will generate different realisations
References
----------
A new efficient numerical method for contact mechanics of rough surfaces
<NAME> <NAME>
Examples
--------
>>> #create the surface object with the specified fractal parameters
>>> my_surface=HurstFractalSurface(1,0.2,1000, shape=(128, 128), grid_spacing=0.01)
>>> #descrtise the surface over a grid 1 unit by 1 unit with a grid_spacing of 0.01
>>> my_surface.discretise()
>>> my_surface.show()
"""
is_discrete = False
surface_type = "hurstFractal"
def __init__(self, sigma: float, hurst_exponent: float, roll_off_frequency: float = 0, generate: bool = False,
grid_spacing: float = None, extent: tuple = None, shape: tuple = None):
self.input_params = (sigma, hurst_exponent, roll_off_frequency)
if hurst_exponent > 1 or hurst_exponent < 0:
raise ValueError('Hurst exponent must be between 0 and 1')
self._hurst_exponent = hurst_exponent
self._sigma = sigma
self._roll_off_frequency = roll_off_frequency
super().__init__(grid_spacing=grid_spacing, extent=extent, shape=shape)
if generate:
self.discretise()
def discretise(self, return_new: bool = False):
"""Generate a new profile realisation, return a new surface if needed
Parameters
----------
return_new: bool
If True a new surface instance is returned, else the profile property of the current surface is set
Returns
-------
out: Surface
A new surface object with the profile set, will have the same shape and grid spacing as the current object
only returned if return_new is True
Notes
-----
As a side effect this will set the FFT and PSD properties of the discretised surface.
Copyright (c) 2016, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution
* Neither the name of Aalto University nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
References
----------
Code ported from the matlab version:
https://uk.mathworks.com/matlabcentral/fileexchange/60817-surface-generator-artificial-randomly-rough-surfaces?s_tid=mwa_osa_a
"""
if self.profile is not None and not return_new:
raise ValueError('Profile is already set, set the return_new argument to true to return a new surface '
'instance with the same fractal properties')
if self.shape is None or self.grid_spacing is None:
raise ValueError('Grid spacing and shape of the surface must be set before a hurst fractal can be '
'generated')
m, n = [s + s % 2 for s in self.shape]
grid_spacing = self.grid_spacing
hurst_exponent = self._hurst_exponent
sigma = self._sigma
qr = self._roll_off_frequency
lx, ly = [n_pts * grid_spacing for n_pts in [m, n]]
# make the wave vectors
qx = np.array([2 * np.pi / m * k for k in range(m)])
qx = np.unwrap((np.fft.fftshift(qx)) - 2 * np.pi) / grid_spacing
qy = np.array([2 * np.pi / n * k for k in range(n)])
qy = np.unwrap(( | np.fft.fftshift(qy) | numpy.fft.fftshift |
import numpy as np
import matplotlib.pyplot as plt
def plot_xy(x, y, ax=None):
if ax == None:
ax = plt.gca()
ax.scatter(x, y)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_title("Training data")
ax.grid(True)
def plot_abline(slope, intercept, xmin, xmax, ax=None):
if ax == None:
ax = plt.gca()
ax.plot([xmin, xmax], [xmin*slope + intercept, xmax*slope + intercept],
linewidth=3, color='red')
class SimpleOnlineLinearRegressor:
## keep track of sufficient statistics
def __init__(self):
self.N = 0
self.x_sum = 0
self.y_sum = 0
self.x_squared_sum = 0
self.y_squared_sum = 0
self.xy_sum = 0
self.w0 = 0
self.w1 = 0
self.sigma2 = 0
def predict(self, X):
return self.w0 + self.w1*X
def fit(self, X, y):
cov = np.cov(X,y,bias=True)
self.N = len(y)
self.w1 = cov[0,1]/cov[0,0]
self.w0 = | np.mean(y) | numpy.mean |
# Standard lib
import unittest
# 3rd party
import numpy as np
# Our own imports
from deep_hipsc_tracking import tracking
from .. import helpers
# Data
T1 = np.array([
[1.0, 2.0, 3.0, 4.0, 5.0],
[1.0, 2.0, 3.0, 4.0, 5.0],
]).T
T2 = np.array([
[1.1, 2.1, 3.1, 4.1],
[1.1, 2.1, 3.1, 4.1],
]).T
T3 = np.array([
[1.2, 2.2, 3.2, 4.2, 5.2],
[1.2, 2.2, 3.2, 4.2, 5.2],
]).T
T4 = np.array([
[1.3, 3.3, 4.3, 5.3],
[1.3, 3.3, 4.3, 5.3],
]).T
T5 = np.array([
[1.4, 2.4, 3.4, 5.4],
[1.4, 2.4, 3.4, 5.4],
]).T
TRACKS = [
(1, None, T1),
(2, None, T2),
(3, None, T3),
(4, None, T4),
(5, None, T5),
]
# Tests
class TestFindFlatRegions(unittest.TestCase):
def test_finds_flat_region_all_flat(self):
tt = np.linspace(0, 100, 100)
yy = tt * 2
res = tracking.find_flat_regions(tt, yy, interp_points=10, cutoff=10, noise_points=5)
exp = [np.ones((100, ), dtype=np.bool)]
msg = 'Got {} rois expected {}'.format(len(res), len(exp))
self.assertEqual(len(res), len(exp), msg)
for r, e in zip(res, exp):
np.testing.assert_almost_equal(r, e)
def test_finds_flat_region_all_spikey(self):
tt = np.linspace(0, 100, 100)
yy = np.array([-100, 0, 100] * 50)
res = tracking.find_flat_regions(tt, yy, interp_points=5, cutoff=1, noise_points=1)
exp = []
msg = 'Got {} rois expected {}'.format(len(res), len(exp))
self.assertEqual(len(res), len(exp), msg)
for r, e in zip(res, exp):
np.testing.assert_almost_equal(r, e)
def test_finds_flat_region_square_waves(self):
tt = np.linspace(0, 100, 100)
yy = np.array(([-100] * 10 + [100] * 10)*5)
res = tracking.find_flat_regions(tt, yy, interp_points=5, cutoff=1, noise_points=1)
exp = []
for i in range(0, 100, 10):
mask = np.zeros((100, ), dtype=np.bool)
if i == 0:
mask[i:i+8] = 1
elif i == 90:
mask[i+2:i+10] = 1
else:
mask[i+2:i+8] = 1
exp.append(mask)
msg = 'Got {} rois expected {}'.format(len(res), len(exp))
self.assertEqual(len(res), len(exp), msg)
for r, e in zip(res, exp):
np.testing.assert_almost_equal(r, e)
class TestRollingFuncs(unittest.TestCase):
def test_rolling_rolling_window(self):
xp = np.array([1, 2, 3, 4, 5])
exp = np.array([2, 3, 4])
res = np.mean(tracking.rolling_window(xp, window=3), axis=-1)
np.testing.assert_almost_equal(res, exp)
exp = np.array([1.5, 2.5, 3.5, 4.5])
res = np.mean(tracking.rolling_window(xp, window=2), axis=-1)
np.testing.assert_almost_equal(res, exp)
exp = np.array([1.3333, 2, 3, 4, 4.6666])
res = np.mean(tracking.rolling_window(xp, window=3, pad='same'), axis=-1)
np.testing.assert_almost_equal(res, exp, decimal=3)
def test_interpolate_window(self):
xp = np.array([1, 2, 3, 4, 5])
yp = np.array([5, 4, 3, 2, 1])
x = np.array([0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 5.5])
y = np.array([5.5, 5, 4.5, 4, 3.5, 3, 2.5, 2, 1, 0.5])
res = tracking.rolling_interp(x, xp, yp, 3)
np.testing.assert_almost_equal(y, res)
def test_slope_window(self):
xp = np.array([1, 2, 3, 4, 5])
yp = np.array([5, 4, 3, 2, 1])
x = np.array([0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 5.5])
a = np.array([-1, -1, -1, -1, -1, -1, -1, -1, -1, -1])
res = tracking.rolling_slope(x, xp, yp, 3)
np.testing.assert_almost_equal(a, res)
class TestMergePointsCluster(unittest.TestCase):
def test_merges_points_with_nans(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
])
points2 = np.array([
[0.0, 0.1],
[1.0, np.nan],
[3.0, 3.1],
])
points = tracking.tracking.merge_points_cluster(points1, points2, max_dist=0.1)
exp_points = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
np.testing.assert_almost_equal(points, exp_points)
def test_merges_same_set(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
])
points = tracking.tracking.merge_points_cluster(points1, points1, max_dist=0.1)
np.testing.assert_almost_equal(points, points1)
def test_merges_both_different(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
])
points = tracking.tracking.merge_points_cluster(points1, points2, max_dist=0.1)
exp_points = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
np.testing.assert_almost_equal(points, exp_points)
def test_merges_superset_left(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
])
points = tracking.tracking.merge_points_cluster(points1, points2, max_dist=0.1)
exp_points = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
np.testing.assert_almost_equal(points, exp_points)
def test_merges_superset_right(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
points = tracking.tracking.merge_points_cluster(points1, points2, max_dist=0.1)
exp_points = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
[2.0, 2.1],
])
np.testing.assert_almost_equal(points, exp_points)
def test_merges_superset_slight_motion(self):
points1 = np.array([
[0.0, 0.2],
[1.0, 1.2],
[2.0, 2.2],
[3.0, 3.2],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
[4.0, 4.1],
])
points = tracking.tracking.merge_points_cluster(points1, points2, max_dist=0.2)
exp_points = np.array([
[0.0, 0.15],
[1.0, 1.15],
[2.0, 2.2],
[3.0, 3.15],
[4.0, 4.1],
])
np.testing.assert_almost_equal(points, exp_points)
class TestMergePointsPairwise(unittest.TestCase):
def test_merges_same_set(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
])
points = tracking.tracking.merge_points_pairwise(points1, points1, max_dist=0.1)
np.testing.assert_almost_equal(points, points1)
def test_merges_both_different(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
])
points = tracking.tracking.merge_points_pairwise(points1, points2, max_dist=0.1)
exp_points = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
np.testing.assert_almost_equal(points, exp_points)
def test_merges_superset_left(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
])
points = tracking.tracking.merge_points_pairwise(points1, points2, max_dist=0.1)
exp_points = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
[2.0, 2.1],
])
np.testing.assert_almost_equal(points, exp_points)
def test_merges_superset_right(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
points = tracking.tracking.merge_points_pairwise(points1, points2, max_dist=0.1)
exp_points = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
[2.0, 2.1],
])
np.testing.assert_almost_equal(points, exp_points)
def test_merges_superset_slight_motion(self):
points1 = np.array([
[0.0, 0.2],
[1.0, 1.2],
[2.0, 2.2],
[3.0, 3.2],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
[4.0, 4.1],
])
points = tracking.tracking.merge_points_pairwise(points1, points2, max_dist=0.2)
exp_points = np.array([
[0.0, 0.15],
[1.0, 1.15],
[3.0, 3.15],
[2.0, 2.2],
[4.0, 4.1],
])
np.testing.assert_almost_equal(points, exp_points)
class TestFindLinkFunctions(unittest.TestCase):
def test_finds_all_the_links(self):
res = tracking.find_link_functions()
exp = {'softassign', 'balltree', 'bipartite_match'}
self.assertEqual(set(res.keys()), exp)
class TestLinks(unittest.TestCase):
def test_to_padded_arrays(self):
tt = np.array([3, 5, 7, 9, 11])
xx = np.array([0, 1, 2, 3, 4])
yy = np.array([1, 2, 3, 4, 5])
chain = tracking.Link.from_arrays(tt, xx, yy)
nan = np.nan
in_tt = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
in_xx = np.array([nan, nan, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, nan])
in_yy = np.array([nan, nan, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, nan])
res_tt, res_xx, res_yy = chain.to_padded_arrays(min_t=1, max_t=13)
np.testing.assert_almost_equal(res_tt, in_tt)
np.testing.assert_almost_equal(res_xx, in_xx)
np.testing.assert_almost_equal(res_yy, in_yy)
in_tt = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
in_xx = np.array([0, 0, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4])
in_yy = np.array([1, 1, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5])
res_tt, res_xx, res_yy = chain.to_padded_arrays(min_t=1, max_t=13, extrapolate=True)
np.testing.assert_almost_equal(res_tt, in_tt)
np.testing.assert_almost_equal(res_xx, in_xx)
np.testing.assert_almost_equal(res_yy, in_yy)
def test_interpolate_chain_regular(self):
tt = np.array([3, 5, 7, 9, 11])
xx = np.array([0, 1, 2, 3, 4])
yy = np.array([1, 2, 3, 4, 5])
chain = tracking.Link.from_arrays(tt, xx, yy)
self.assertEqual(len(chain), 5)
chain.interpolate_points()
self.assertEqual(len(chain), 9)
tt = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11])
xx = np.array([0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4])
yy = np.array([1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5])
exp_chain = tracking.Link()
for t, x, y in zip(tt, xx, yy):
exp_chain.add(-1, t, x, y)
self.assertEqual(chain, exp_chain)
def test_interpolate_chain_irregular(self):
tt = np.array([3, 5, 9, 11])
xx = np.array([0, 1, 3, 4])
yy = np.array([1, 2, 4, 5])
chain = tracking.Link.from_arrays(tt, xx, yy)
self.assertEqual(len(chain), 4)
chain.interpolate_points()
self.assertEqual(len(chain), 9)
tt = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11])
xx = np.array([0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4])
yy = np.array([1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5])
exp_chain = tracking.Link()
for t, x, y in zip(tt, xx, yy):
exp_chain.add(-1, t, x, y)
self.assertEqual(chain, exp_chain)
def test_chain_from_arrays(self):
tt = np.array([1, 3, 5, 7, 9])
xx = np.array([0, 1, 2, 3, 4])
yy = np.array([1, 2, 3, 4, 5])
chain = tracking.Link.from_arrays(tt, xx, yy)
exp_chain = tracking.Link()
for t, x, y in zip(tt, xx, yy):
exp_chain.add(-1, t, x, y)
self.assertEqual(chain, exp_chain)
def test_to_arrays(self):
tt = np.array([1, 3, 5, 7, 9])
xx = np.array([0, 1, 2, 3, 4])
yy = | np.array([1, 2, 3, 4, 5]) | numpy.array |
#!/usr/bin/env python
"""Module to drive ws2812 from SPI
Copyright 2021 <NAME>
SPDX Apache License 2.0
"""
import logging
import math
import re
import time
from ipaddress import IPv4Address, IPv6Address, ip_address
from pathlib import Path
from threading import BoundedSemaphore, Event, Thread
from typing import Any, ClassVar, Dict, List, Optional, Tuple, Union
import attr
import numpy as np # type: ignore
import pendulum
from cattr import Converter
from pendulum import DateTime
from spidev import SpiDev # type: ignore
logger = logging.getLogger(__name__)
def to_iso8601(datetime: DateTime) -> str:
return datetime.to_iso8601_string()
IPType = Union[IPv4Address, IPv6Address]
converter = Converter()
converter.register_unstructure_hook(Path, str)
converter.register_structure_hook(Path, lambda pathstr, _: Path(pathstr))
converter.register_unstructure_hook(DateTime, to_iso8601)
converter.register_structure_hook(DateTime, lambda isostr, _: pendulum.parse(isostr))
converter.register_unstructure_hook(IPv4Address, str)
converter.register_structure_hook(IPv4Address, lambda ipstr, _: ip_address(ipstr))
converter.register_unstructure_hook(IPv6Address, str)
converter.register_structure_hook(IPv6Address, lambda ipstr, _: ip_address(ipstr))
converter.register_structure_hook(IPType, lambda ipstr, _: ip_address(ipstr))
np.set_printoptions(formatter={"int": lambda i: f"{i:3}"})
class SPIws2821BusNotFound(Exception):
pass
@attr.s
class SPIws2812Config:
spidev: Path = attr.ib()
@spidev.validator
def _check_spidev(self, attribute, value: Path):
if not value.exists():
raise ValueError(f"Path '{value}' does not exist")
num_leds: int = attr.ib()
@num_leds.validator
def _check_num_leds(self, attribute, value: int):
if value <= 0:
raise ValueError("num_leds must be an integer greater than one")
bus: int = attr.ib()
@bus.default
def _get_bus(self):
m = self.bus_cs_pattern.match(str(self.spidev))
if m:
return int(m.group(1))
else:
raise ValueError(
f"Failed to extract bus (first digit) from spidev '{self.spidev}'"
)
cs: int = attr.ib()
@cs.default
def _get_cs(self):
m = self.bus_cs_pattern.match(str(self.spidev))
if m:
return int(m.group(2))
else:
raise ValueError(
f"Failed to extract cs (second digit) from spidev '{self.spidev}'"
)
bus_cs_pattern: ClassVar[re.Pattern] = re.compile(r"/dev/spidev(\d+).(\d+)")
@attr.s
class SPIws2812:
"""Class to drive a string of ws2812 attached to a SPI bus.
Notes:
The MOSI line idles high. This causes the first LED to be green most of
the time as the MSB of the GRB 24 bit code is seen as a "1". Clear it out
by appending a bus reset of RESET_BYTES_COUNT of 0b0 bytes on the front of any
transmission.
This class uses RESET_BYTES_COUNT of 0b0 bytes + 24 bytes for each led,
8 green, 8 red and 8 blue.
Using numpy arrays should be fast as SpiDev.writebytes2 consumes them directly without
copying.
"""
spidev: SpiDev = attr.ib()
num_leds: int = attr.ib()
led_string_ones: np.ndarray = attr.ib()
led_string_zeros: np.ndarray = attr.ib()
tx_buf_clear: np.ndarray = attr.ib()
tx_buf: np.ndarray = attr.ib()
tx_thread: "Optional[SimpleTimer]" = attr.ib()
tx_thread_stop: Event = attr.ib()
tx_array_lock: BoundedSemaphore = attr.ib()
fps: int = attr.ib()
tx_array: "Optional[np.ndarray]" = attr.ib()
LED_ZERO: ClassVar[int] = 0b1100_0000 # ws2812 "0" 0.15385us * 2 "1's" = 0.308us
LED_ONE: ClassVar[int] = 0b1111_1100 # ws2812 "1" 0.15385us * 6 "1's" = 0.923us
RESET_BYTES_COUNT: ClassVar[int] = 42 # 51.7us of flatline output
class SimpleTimer(Thread):
"""Runs inside and is responsible for animations.
It accesses its parent to do things, which is rather
suboptimal but made creating it easier.
"""
def __init__(self, parent: "SPIws2812", *args, **kwargs):
Thread.__init__(self, *args, **kwargs)
self.parent = parent
self.index = 0
def run(self):
while not self.parent.tx_thread_stop.wait(1 / self.parent.fps):
with self.parent.tx_array_lock:
if self.parent.tx_array is not None:
rows, _ = self.parent.tx_array.shape
if self.index >= rows:
self.index = 0
self.parent.write_array(self.parent.tx_array[self.index])
self.index += 1
@classmethod
def init_from_dict(cls, config_dict: "Dict[str, Any]") -> "SPIws2812":
config = converter.structure(config_dict, SPIws2812Config)
return cls.init((config.bus, config.cs), config.num_leds)
@classmethod
def init(cls, spi_bus_cs: "Tuple[int,int]", num_leds: int) -> "SPIws2812":
"""Initialize an instance of this class correctly from supplied info.
Use instead of SPIws2812()
Args:
spi_bus_cs: (bus, cs) - from /dev/spidev1.0 the bus is 1, and the cs is 0
so (1, 0)
num_leds: The number of leds in the string of ws2812 leds
Returns:
Fully initialized SPIws2812 class, ready to write
"""
spi = SpiDev()
try:
logger.debug("SPI BUS %d CS %s", spi_bus_cs[0], spi_bus_cs[1])
spi.open(spi_bus_cs[0], spi_bus_cs[1])
except OSError as e:
logger.error("Failed to open spidev", exc_info=e)
raise SPIws2821BusNotFound
spi.max_speed_hz = 6_500_000
spi.mode = 0b00
spi.lsbfirst = False
tx_unpacked_ones = np.full(num_leds * 24, SPIws2812.LED_ONE, dtype=np.uint8)
tx_unpacked_zeros = | np.full(num_leds * 24, SPIws2812.LED_ZERO, dtype=np.uint8) | numpy.full |
"""This module/class contains functionality for computing (and plotting) radial
velocities and creating reference spectra for extracted fluxes. This should
ideally remain independent of the extraction method, such that it does not
matter which spectrograph took the data, nor what "Spectrograph" object was
used for extraction.
Most of the code below has been moved from the script "test_rhea2_extract.py".
Work still needs to be done post-refactor to ensure function input and outputs
are sensible, their docstrings are informative and they follow the principles of
Object Oriented Programming - such as the Single Responsibility Principle (Along
with a general clean up of the code and comments, such as having the code meet
the python line length guidelines --> the main benefit of which is having
multiple editors open side by side on smaller screens)
TODO
1) Move extract method to either extract module or rhea
2) Try to separate calculation/processing of data from saving/loading/displaying
3) Tidy up inputs to functions (e.g. cull unnecessary input parameters)
4) Make create_ref_spect() output variances (Median Absolute Deviations)
5) Possibly have dark calibration (for both flats and science frames) in its own
method. This would clean up the existing extract method, removing the need
to check whether darks and flats had been passed in (or varying permutations
of each - e.g. in the case where some of the data has already been dark
corrected, such as the solar data)
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as op
import scipy.interpolate as interp
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy import constants as const
import PyAstronomy.pyasl as pyasl
import opticstools as ot
import pdb
try:
import pyfits
except:
import astropy.io.fits as pyfits
class RadialVelocity():
"""A RadialVelocity object for calculating and plotting RVS and generating
reference spectra.
Unclear if the object needs to be initialised with any parameters at this
stage. Perhaps a file path?
"""
def __init__(self):
"""(Presently empty) constructor.
"""
pass
def rv_shift_resid(self, params, wave, spect, spect_sdev, spline_ref,
return_spect=False):
"""Find the residuals to a fit of a (subsampled)reference spectrum to an
observed spectrum.
The function for parameters p[0] through p[3] is:
.. math::
y(x) = Ref[ wave(x) * (1 - p[0]/c) ] * exp(p[1] * x^2 + p[2] * x + p[3])
Here "Ref" is a function f(wave)
Parameters
----------
params: array-like
wave: float array
Wavelengths for the observed spectrum.
spect: float array
The observed spectra
spect_sdev: float array
standard deviation of the input spectra.
spline_ref: InterpolatedUnivariateSpline instance
For interpolating the reference spectrum
return_spect: boolean
Whether to return the fitted spectrum or the residuals.
wave_ref: float array
The wavelengths of the reference spectrum
ref: float array
The reference spectrum
Returns
-------
resid: float array
The fit residuals
"""
ny = len(spect)
xx = (np.arange(ny)-ny//2)/ny
norm = np.exp(params[1]*xx**2 + params[2]*xx + params[3])
# Lets get this sign correct. A redshift (positive velocity) means that
# a given wavelength for the reference corresponds to a longer
# wavelength for the target, which in turn means that the target
# wavelength has to be interpolated onto shorter wavelengths for the
# reference.
fitted_spect = spline_ref(wave*(1.0 - params[0]/const.c.si.value))*norm
if return_spect:
return fitted_spect
else:
return (fitted_spect - spect)/spect_sdev
def rv_shift_chi2(self, params, wave, spect, spect_sdev, spline_ref):
"""Find the chi-squared for an RV fit. Just a wrapper for rv_shift_resid,
so the docstring is cut and paste!
The function for parameters p[0] through p[3] is:
.. math::
y(x) = Ref[ wave(x) * (1 - p[0]/c) ] * exp(p[1] * x^2 + p[2] * x + p[3])
Here "Ref" is a function f(wave)
Parameters
----------
params:
...
wave: float array
Wavelengths for the observed spectrum.
spect: float array
The observed spectrum
spect_sdev:
...
spline_ref:
...
return_spect: boolean
Whether to return the fitted spectrum or the
wave_ref: float array
The wavelengths of the reference spectrum
ref: float array
The reference spectrum
Returns
-------
chi2:
The fit chi-squared
"""
return np.sum(self.rv_shift_resid(params, wave, spect, spect_sdev, spline_ref)**2)
def rv_shift_jac(self, params, wave, spect, spect_sdev, spline_ref):
r"""Explicit Jacobian function for rv_shift_resid.
This is not a completely analytic solution, but without it there seems to be
numerical instability.
The key equations are:
.. math:: f(x) = R( \lambda(x) (1 - p_0/c) ) \times \exp(p_1 x^2 + p_2 + p_3)
g(x) = (f(x) - d(x))/\sigma(x)
\frac{dg}{dp_0}(x) \approx [f(x + 1 m/s) -f(x) ]/\sigma(x)
\frac{dg}{dp_1}(x) = x^2 f(x) / \sigma(x)
\frac{dg}{dp_2}(x) = x f(x) / \sigma(x)
\frac{dg}{dp_3}(x) = f(x) / \sigma(x)
Parameters
----------
params: float array
wave: float array
Wavelengths for the observed spectrum.
spect: float array
The observed spectrum
spect_sdev:
...
spline_ref:
...
Returns
-------
jac:
The Jacobian.
"""
ny = len(spect)
xx = (np.arange(ny)-ny//2)/ny
norm = np.exp(params[1]*xx**2 + params[2]*xx + params[3])
fitted_spect = spline_ref(wave*(1.0 - params[0]/const.c.si.value))*norm
jac = np.empty( (ny,4) )
#The Jacobian is the derivative of fitted_spect/sdev with respect to
#p[0] through p[3]
jac[:,3] = fitted_spect/spect_sdev
jac[:,2] = fitted_spect*xx/spect_sdev
jac[:,1] = fitted_spect*xx**2/spect_sdev
jac[:,0] = (spline_ref(wave*(1.0 - (params[0] + 1.0)/const.c.si.value))*
norm - fitted_spect)/spect_sdev
return jac
def create_ref_spect(self, wave, fluxes, vars, bcors, rebin_fact=2,
gauss_sdev=1.0, med_cut=0.6,gauss_hw=7,threshold=100):
"""Create a reference spectrum from a series of target spectra.
The process is:
1) Re-grid the spectra into a rebin_fact times smaller wavelength grid.
2) The spectra are barycentrically corrected by linear interpolation. Note
that when used on a small data set, typically the spectra will be shifted by
many km/s. For an RV-stable star, the fitting process then needs to find the
opposite of this barycentric velocity.
3) Remove bad (i.e. low flux) files.
4) Median combine the spectra.
5) Convolve the result by a Gaussian to remove high spatial frequency noise. This
can be important when the reference spectrum is created from only a small
number of input spectra, and high-frequency noise can be effectively fitted to
itself.
Parameters
----------
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
rebin_fact: int
Factor by which to rebin.
gauss_sdev:
...
med_cut:
...
gauss_hw:
...
Returns
-------
wave_ref: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel*2+2),
where the wavelength scale has been interpolated.
ref_spect: 2D np.array(float)
Reference spectrum of form (Order, Flux/pixel*2+2),
where the flux scale has been interpolated.
"""
nm = fluxes.shape[1]
ny = fluxes.shape[2]
nf = fluxes.shape[0]
C = const.c.si.value
#Create arrays for our outputs.
wave_ref = np.empty( (nm,rebin_fact*ny + 2) )
ref_spect = np.empty( (nm,rebin_fact*ny + 2) )
#First, rebin everything, using opticstools.utils.regrid_fft
new_shape = (fluxes.shape[1],rebin_fact*fluxes.shape[2])
fluxes_rebin = np.empty( (fluxes.shape[0],fluxes.shape[1],
rebin_fact*fluxes.shape[2]) )
for i in range(nf):
fluxes_rebin[i] = ot.utils.regrid_fft(fluxes[i],new_shape)
#Create the final wavelength grid.
for j in range(nm):
wave_ref[j,1:-1] = np.interp(np.arange(rebin_fact*ny)/rebin_fact,
np.arange(ny),wave[j,:])
#Fill in the end wavelengths, including +/-100 km/s from the ends.
wave_ref[j,-2] = wave_ref[j,-3] + (wave_ref[j,-3]-wave_ref[j,-4])
wave_ref[j,0] = wave_ref[j,1] * (C + 1e5)/C
wave_ref[j,-1] = wave_ref[j,-2] * (C - 1e5)/C
#Barycentric correct. For a positive barycentric velocity, the observer is
#moving towards the star, which means that star is blue-shifted and the
#correct rest-frame spectrum is at longer wavelengths. The interpolation
#below shifts the spectrum to the red, as required.
for i in range(nf):
for j in range(nm):
# Awkwardly, we've extended the wavelength scale by 2 elements,
# but haven't yet extended the fluxes...
ww = wave_ref[j,1:-1]
fluxes_rebin[i,j] = np.interp(ww*(1-bcors[i]/C), ww[::-1],
fluxes_rebin[i,j,::-1])
#!!! New Code. This was already checked and makes no sense.
#Combine the spectra.
flux_meds = np.median(fluxes_rebin,axis=2)
flux_files = np.median(flux_meds,axis=1)
if med_cut > 0:
good_files = np.where(flux_files > med_cut*np.median(flux_files))[0]
else:
good_files = np.arange(len(flux_files),dtype=np.int)
flux_orders = np.median(flux_meds[good_files],axis=0)
flux_norm = fluxes_rebin.copy()
for g in good_files:
for j in range(nm):
flux_norm[g,j,:] /= flux_meds[g,j]
#pdb.set_trace()
#Create a median over files
flux_ref = np.median(flux_norm[good_files],axis=0)
#Multiply this by the median for each order
for j in range(nm):
flux_ref[j] *= flux_orders[j]
#Threshold the data whenever the flux is less than "threshold"
if (threshold > 0):
bad = flux_ref<2*threshold
flux_ref[bad] *= np.maximum(flux_ref[bad]-threshold,0)/threshold
# Create a Gaussian smoothing function for the reference spectrum. This
# is needed to prevent a bias to zero radial velocity, especially in the
# case of few data points.
gg = np.exp(-(np.arange(2*gauss_hw+1)-gauss_hw)**2/2.0/gauss_sdev**2)
gg /= np.sum(gg)
one_order = np.empty(flux_ref.shape[1] + 2*gauss_hw)
for j in range(nm):
one_order[gauss_hw:-gauss_hw] = flux_ref[j,:]
one_order[:gauss_hw] = one_order[gauss_hw]
one_order[-gauss_hw:] = one_order[-gauss_hw-1]
ref_spect[j,:] = np.convolve(one_order, gg,
mode='same')[gauss_hw-1:1-gauss_hw]
return wave_ref, ref_spect
def extract_spectra(self, files, extractor, star_dark=None, flat_files=None,
flat_dark=None, location=('151.2094','-33.865',100.0),
coord=None, do_bcor=True, ra_dec_hr=False):
"""Extract the spectrum from a file, given a dark file, a flat file and
a dark for the flat. The process is:
1) Dark correcting the data and the flat fields.
2) Computing (but not applying) Barycentric corrections.
3) Extracting the data and the flat fields using the extract module, to form
:math:`f_m(x)`, the flux for orders m and dispersion direction pixels x.
4) Normalising the flat fields, so that the median of each order is 1.0.
5) Dividing by the extracted flat field. Uncertainties from the flat field are
added in quadrature.
TODO: Not the neatest implementation, but should account for the fact that
there are no flats or darks for the ThAr frames. Might be worth tidying
up and making the implementation a little more elegant.
Parameters
----------
files: list of strings
One string for each file. CAn be on separate nights - a full
pathname should be given.
star_dark:
flat_files: list of strings.
One string for each star file. CAn be on separate nights - a full
pathname should be given.
flat_dark:
location: (lattitude:string, longitude:string, elevation:string)
The location on Earth where the data were taken.
coord: astropy.coordinates.sky_coordinate.SkyCoord
The coordinates of the observation site
do_bcor: boolean
Flag for whether to do barycentric correction
Returns
-------
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation.
"""
# Initialise list of return values
# Each index represents a single observation
fluxes = []
vars = []
dates = []
bcors = []
#!!! This is dodgy, as files and flat_files should go together in a dict
for ix,file in enumerate(files):
# Dark correct the science and flat frames
# Only if flat/darks have been supplied --> ThAr might not have them
# If not supplied, just use science/reference data
try:
# Dark correct science frames
if len(star_dark) > 0:
data = pyfits.getdata(file) - star_dark
else:
data = pyfits.getdata(file)
# Dark correct flats
if len(flat_files) > 0 and len(flat_dark) > 0:
flat = pyfits.getdata(flat_files[ix]) - flat_dark
elif len(flat_files) > 0:
flat = pyfits.getdata(flat_files[ix])
except:
print('Unable to calibrate file ' + file +
'. Check that format of data arrays are consistent.')
print(pyfits.getdata(file).shape)
print(star_dark.shape)
continue
header = pyfits.getheader(file)
date = Time(header['JD'], format='jd', location=location)
dates.append(date)
# Determine the barycentric correction
if do_bcor:
if not coord:
# Depending on whether the RA and DEC is saved in hours or
# degrees, load and create a SkyCoord object
if ra_dec_hr:
ra_deg = float(header['RA'])*15
else:
ra_deg = float(header['RA'])
dec_deg = float(header['DEC'])
coord = SkyCoord(ra=ra_deg, dec=dec_deg, unit='deg')
if not location:
location=(float(header['LONG']), float(header['LAT']),
float(header['HEIGHT']))
#(obs_long, obs_lat, obs_alt, ra2000, dec2000, jd, debug=False)
#pdb.set_trace()
bcors.append(1e3*pyasl.helcorr(float(location[0]),
float(location[1]),location[2],coord.ra.deg,
coord.dec.deg,date.jd)[0] )
else:
bcors.append(0.0)
# Extract the fluxes and variance for the science and flat frames
print("Extracting spectra from file #", str(ix))
flux, var = extractor.one_d_extract(data=data, rnoise=20.0)
# Continue only when flats have been supplied
# Perform flat field correction and adjust variances
if len(flat_files) > 0:
flat_flux, fvar = extractor.one_d_extract(data=flat,
rnoise=20.0)
for j in range(flat_flux.shape[0]):
medf = | np.median(flat_flux[j]) | numpy.median |
"""
:mod:`meshes` -- Discretization
===============================
Everything related to meshes appropriate for the multigrid solver.
"""
# Copyright 2018-2020 The emg3d Developers.
#
# This file is part of emg3d.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import numpy as np
from copy import deepcopy
from scipy import optimize
__all__ = ['TensorMesh', 'get_hx_h0', 'get_cell_numbers', 'get_stretched_h',
'get_domain', 'get_hx']
class TensorMesh:
"""Rudimentary mesh for multigrid calculation.
The tensor-mesh :class:`discretize.TensorMesh` is a powerful tool,
including sophisticated mesh-generation possibilities in 1D, 2D, and 3D,
plotting routines, and much more. However, in the multigrid solver we have
to generate a mesh at each level, many times over and over again, and we
only need a very limited set of attributes. This tensor-mesh class provides
all required attributes. All attributes here are the same as their
counterparts in :class:`discretize.TensorMesh` (both in name and value).
.. warning::
This is a slimmed-down version of :class:`discretize.TensorMesh`, meant
principally for internal use by the multigrid modeller. It is highly
recommended to use :class:`discretize.TensorMesh` to create the input
meshes instead of this class. There are no input-checks carried out
here, and there is only one accepted input format for `h` and `x0`.
Parameters
----------
h : list of three ndarrays
Cell widths in [x, y, z] directions.
x0 : ndarray of dimension (3, )
Origin (x, y, z).
"""
def __init__(self, h, x0):
"""Initialize the mesh."""
self.x0 = x0
# Width of cells.
self.hx = h[0]
self.hy = h[1]
self.hz = h[2]
# Cell related properties.
self.nCx = int(self.hx.size)
self.nCy = int(self.hy.size)
self.nCz = int(self.hz.size)
self.vnC = np.array([self.hx.size, self.hy.size, self.hz.size])
self.nC = int(self.vnC.prod())
self.vectorCCx = np.r_[0, self.hx[:-1].cumsum()]+self.hx*0.5+self.x0[0]
self.vectorCCy = np.r_[0, self.hy[:-1].cumsum()]+self.hy*0.5+self.x0[1]
self.vectorCCz = np.r_[0, self.hz[:-1].cumsum()]+self.hz*0.5+self.x0[2]
# Node related properties.
self.nNx = self.nCx + 1
self.nNy = self.nCy + 1
self.nNz = self.nCz + 1
self.vnN = np.array([self.nNx, self.nNy, self.nNz], dtype=int)
self.nN = int(self.vnN.prod())
self.vectorNx = np.r_[0., self.hx.cumsum()] + self.x0[0]
self.vectorNy = np.r_[0., self.hy.cumsum()] + self.x0[1]
self.vectorNz = np.r_[0., self.hz.cumsum()] + self.x0[2]
# Edge related properties.
self.vnEx = np.array([self.nCx, self.nNy, self.nNz], dtype=int)
self.vnEy = np.array([self.nNx, self.nCy, self.nNz], dtype=int)
self.vnEz = np.array([self.nNx, self.nNy, self.nCz], dtype=int)
self.nEx = int(self.vnEx.prod())
self.nEy = int(self.vnEy.prod())
self.nEz = int(self.vnEz.prod())
self.vnE = np.array([self.nEx, self.nEy, self.nEz], dtype=int)
self.nE = int(self.vnE.sum())
def __repr__(self):
"""Simple representation."""
return (f"TensorMesh: {self.nCx} x {self.nCy} x {self.nCz} "
f"({self.nC:,})")
def copy(self):
"""Return a copy of the TensorMesh."""
return TensorMesh.from_dict(self.to_dict(True))
def to_dict(self, copy=False):
"""Store the necessary information of the TensorMesh in a dict."""
out = {'hx': self.hx, 'hy': self.hy, 'hz': self.hz, 'x0': self.x0,
'__class__': self.__class__.__name__}
if copy:
return deepcopy(out)
else:
return out
@classmethod
def from_dict(cls, inp):
"""Convert dictionary into :class:`TensorMesh` instance.
Parameters
----------
inp : dict
Dictionary as obtained from :func:`TensorMesh.to_dict`.
The dictionary needs the keys `hx`, `hy`, `hz`, and `x0`.
Returns
-------
obj : :class:`TensorMesh` instance
"""
try:
return cls(h=[inp['hx'], inp['hy'], inp['hz']], x0=inp['x0'])
except KeyError as e:
print(f"* ERROR :: Variable {e} missing in `inp`.")
raise
@property
def vol(self):
"""Construct cell volumes of the 3D model as 1D array."""
if getattr(self, '_vol', None) is None:
self._vol = (self.hx[None, None, :]*self.hy[None, :, None] *
self.hz[:, None, None]).ravel()
return self._vol
def get_hx_h0(freq, res, domain, fixed=0., possible_nx=None, min_width=None,
pps=3, alpha=None, max_domain=100000., raise_error=True, verb=1,
return_info=False):
r"""Return cell widths and origin for given parameters.
Returns cell widths for the provided frequency, resistivity, domain extent,
and other parameters using a flexible amount of cells. See input parameters
for more details. A maximum of three hard/fixed boundaries can be provided
(one of which is the grid center).
The minimum cell width is calculated through :math:`\delta/\rm{pps}`, where
the skin depth is given by :math:`\delta = 503.3 \sqrt{\rho/f}`, and the
parameter `pps` stands for 'points-per-skindepth'. The minimum cell width
can be restricted with the parameter `min_width`.
The actual calculation domain adds a buffer zone around the (survey)
domain. The thickness of the buffer is six times the skin depth. The field
is basically zero after two wavelengths. A wavelength is
:math:`2\pi\delta`, hence roughly 6 times the skin depth. Taking a factor 6
gives therefore almost two wavelengths, as the field travels to the
boundary and back. The actual buffer thickness can be steered with the
`res` parameter.
One has to take into account that the air is very resistive, which has to
be considered not just in the vertical direction, but also in the
horizontal directions, as the airwave will bounce back from the sides
otherwise. In the marine case this issue reduces with increasing water
depth.
See Also
--------
get_stretched_h : Get `hx` for a fixed number `nx` and within a fixed
domain.
Parameters
----------
freq : float
Frequency (Hz) to calculate the skin depth. The skin depth is a concept
defined in the frequency domain. If a negative frequency is provided,
it is assumed that the calculation is carried out in the Laplace
domain. To calculate the skin depth, the value of `freq` is then
multiplied by :math:`-2\pi`, to simulate the closest
frequency-equivalent.
res : float or list
Resistivity (Ohm m) to calculate the skin depth. The skin depth is
used to calculate the minimum cell width and the boundary thicknesses.
Up to three resistivities can be provided:
- float: Same resistivity for everything;
- [min_width, boundaries];
- [min_width, left boundary, right boundary].
domain : list
Contains the survey-domain limits [min, max]. The actual calculation
domain consists of this domain plus a buffer zone around it, which
depends on frequency and resistivity.
fixed : list, optional
Fixed boundaries, one, two, or maximum three values. The grid is
centered around the first value. Hence it is the center location with
the smallest cell. Two more fixed boundaries can be added, at most one
on each side of the first one.
Default is 0.
possible_nx : list, optional
List of possible numbers of cells. See :func:`get_cell_numbers`.
Default is ``get_cell_numbers(500, 5, 3)``, which corresponds to
[16, 24, 32, 40, 48, 64, 80, 96, 128, 160, 192, 256, 320, 384].
min_width : float, list or None, optional
Minimum cell width restriction:
- None : No restriction;
- float : Fixed to this value, ignoring skin depth and `pps`.
- list [min, max] : Lower and upper bounds.
Default is None.
pps : int, optional
Points per skindepth; minimum cell width is calculated via
`dmin = skindepth/pps`.
Default = 3.
alpha : list, optional
Maximum alpha and step size to find a good alpha. The first value is
the maximum alpha of the survey domain, the second value is the maximum
alpha for the buffer zone, and the third value is the step size.
Default = [1, 1.5, .01], hence no stretching within the survey domain
and a maximum stretching of 1.5 in the buffer zone; step size is 0.01.
max_domain : float, optional
Maximum calculation domain from fixed[0] (usually source position).
Default is 100,000.
raise_error : bool, optional
If True, an error is raised if no suitable grid is found. Otherwise it
just prints a message and returns None's.
Default is True.
verb : int, optional
Verbosity, 0 or 1.
Default = 1.
return_info : bool
If True, a dictionary is returned with some grid info (min and max
cell width and alpha).
Returns
-------
hx : ndarray
Cell widths of mesh.
x0 : float
Origin of the mesh.
info : dict
Dictionary with mesh info; only if ``return_info=True``.
Keys:
- `dmin`: Minimum cell width;
- `dmax`: Maximum cell width;
- `amin`: Minimum alpha;
- `amax`: Maximum alpha.
"""
# Get variables with default lists:
if alpha is None:
alpha = [1, 1.5, 0.01]
if possible_nx is None:
possible_nx = get_cell_numbers(500, 5, 3)
# Cast resistivity value(s).
res = np.array(res, ndmin=1)
if res.size == 1:
res_arr = np.array([res[0], res[0], res[0]])
elif res.size == 2:
res_arr = np.array([res[0], res[1], res[1]])
else:
res_arr = np.array([res[0], res[1], res[2]])
# Cast and check fixed.
fixed = np.array(fixed, ndmin=1)
if fixed.size > 2:
# Check length.
if fixed.size > 3:
print("\n* ERROR :: Maximum three fixed boundaries permitted.\n"
f" Provided: {fixed.size}.")
raise ValueError("Wrong input for fixed")
# Sort second and third, so it doesn't matter how it was provided.
fixed = np.array([fixed[0], max(fixed[1:]), min(fixed[1:])])
# Check side.
if np.sign(np.diff(fixed[:2])) == np.sign(np.diff(fixed[::2])):
print("\n* ERROR :: 2nd and 3rd fixed boundaries have to be "
"left and right of the first one.\n "
f"Provided: [{fixed[0]}, {fixed[1]}, {fixed[2]}]")
raise ValueError("Wrong input for fixed")
# Calculate skin depth.
skind = 503.3*np.sqrt(res_arr/abs(freq))
if freq < 0: # For Laplace-domain calculations.
skind /= np.sqrt(2*np.pi)
# Minimum cell width.
dmin = skind[0]/pps
if min_width is not None: # Respect user input.
min_width = np.array(min_width, ndmin=1)
if min_width.size == 1:
dmin = min_width
else:
dmin = np.clip(dmin, *min_width)
# Survey domain; contains all sources and receivers.
domain = np.array(domain, dtype=float)
# Calculation domain; big enough to avoid boundary effects.
# To avoid boundary effects we want the signal to travel two wavelengths
# from the source to the boundary and back to the receiver.
# => 2*pi*sd ~ 6.3*sd = one wavelength => signal is ~ 0.2 %.
# Two wavelengths we can safely assume it is zero.
#
# The air does not follow the concept of skin depth, as it is a wave rather
# than diffusion. For this is the factor `max_domain`, which restricts
# the domain in each direction to this value from the center.
# (a) Source to edges of domain.
dist_in_domain = abs(domain - fixed[0])
# (b) Two wavelengths.
two_lambda = skind[1:]*4*np.pi
# (c) Required buffer, additional to domain.
dist_buff = np.max([np.zeros(2), (two_lambda - dist_in_domain)/2], axis=0)
# (d) Add buffer to domain.
calc_domain = np.array([domain[0]-dist_buff[0], domain[1]+dist_buff[1]])
# (e) Restrict total domain to max_domain.
calc_domain[0] = max(calc_domain[0], fixed[0]-max_domain)
calc_domain[1] = min(calc_domain[1], fixed[0]+max_domain)
# Initiate flag if terminated.
finished = False
# Initiate alpha variables for survey and calculation domains.
sa, ca = 1.0, 1.0
# Loop over possible cell numbers from small to big.
for nx in np.unique(possible_nx):
# Loop over possible alphas for domain.
for sa in np.arange(1.0, alpha[0]+alpha[2]/2, alpha[2]):
# Get current stretched grid cell sizes.
thxl = dmin*sa**np.arange(nx) # Left of origin.
thxr = dmin*sa**np.arange(nx) # Right of origin.
# 0. Adjust stretching for fixed boundaries.
if fixed.size > 1: # Move mesh to first fixed boundary.
t_nx = np.r_[fixed[0], fixed[0]+np.cumsum(thxr)]
ii = np.argmin(abs(t_nx-fixed[1]))
thxr *= abs(fixed[1]-fixed[0])/np.sum(thxr[:ii])
if fixed.size > 2: # Move mesh to second fixed boundary.
t_nx = np.r_[fixed[0], fixed[0]-np.cumsum(thxl)]
ii = np.argmin(abs(t_nx-fixed[2]))
thxl *= abs(fixed[2]-fixed[0])/np.sum(thxl[:ii])
# 1. Fill from center to left domain.
nl = np.sum((fixed[0]-np.cumsum(thxl)) > domain[0])+1
# 2. Fill from center to right domain.
nr = np.sum((fixed[0]+np.cumsum(thxr)) < domain[1])+1
# 3. Get remaining number of cells and check termination criteria.
nsdc = nl+nr # Number of domain cells.
nx_remain = nx-nsdc
# Not good, try next.
if nx_remain <= 0:
continue
# Create the current hx-array.
hx = np.r_[thxl[:nl][::-1], thxr[:nr]]
hxo = np.r_[thxl[:nl][::-1], thxr[:nr]]
# Get actual domain:
asurv_domain = [fixed[0]-np.sum(thxl[:nl]),
fixed[0]+np.sum(thxr[:nr])]
x0 = float(fixed[0]-np.sum(thxl[:nl]))
# Get actual stretching (differs in case of fixed layers).
sa_adj = np.max([hx[1:]/hx[:-1], hx[:-1]/hx[1:]])
# Loop over possible alphas for calc_domain.
for ca in np.arange(sa, alpha[1]+alpha[2]/2, alpha[2]):
# 4. Fill to left calc_domain.
thxl = hx[0]*ca**np.arange(1, nx_remain+1)
nl = np.sum((asurv_domain[0]-np.cumsum(thxl)) >
calc_domain[0])+1
# 5. Fill to right calc_domain.
thxr = hx[-1]*ca**np.arange(1, nx_remain+1)
nr = np.sum((asurv_domain[1]+np.cumsum(thxr)) <
calc_domain[1])+1
# 6. Get remaining number of cells and check termination
# criteria.
ncdc = nl+nr # Number of calc_domain cells.
nx_remain2 = nx-nsdc-ncdc
if nx_remain2 < 0: # Not good, try next.
continue
# Create hx-array.
nl += int(np.floor(nx_remain2/2)) # If uneven, add one cell
nr += int(np.ceil(nx_remain2/2)) # more on the right.
hx = np.r_[thxl[:nl][::-1], hx, thxr[:nr]]
# Calculate origin.
x0 = float(asurv_domain[0]-np.sum(thxl[:nl]))
# Mark it as finished and break out of the loop.
finished = True
break
if finished:
break
if finished:
break
# Check finished and print info about found grid.
if not finished:
# Throw message if no solution was found.
print("\n* ERROR :: No suitable grid found; relax your criteria.\n")
if raise_error:
raise ArithmeticError("No grid found!")
else:
hx, x0 = None, None
elif verb > 0:
print(f" Skin depth ", end="")
if res.size == 1:
print(f" [m] : {skind[0]:.0f}")
elif res.size == 2:
print(f"(m/l-r) [m] : {skind[0]:.0f} / {skind[1]:.0f}")
else:
print(f"(m/l/r) [m] : {skind[0]:.0f} / {skind[1]:.0f} / "
f"{skind[2]:.0f}")
print(f" Survey domain [m] : {domain[0]:.0f} - "
f"{domain[1]:.0f}")
print(f" Calculation domain [m] : {calc_domain[0]:.0f} - "
f"{calc_domain[1]:.0f}")
print(f" Final extent [m] : {x0:.0f} - "
f"{x0+np.sum(hx):.0f}")
extstr = f" Min/max cell width [m] : {min(hx):.0f} / "
alstr = f" Alpha survey"
nrstr = " Number of cells "
if not np.isclose(sa, sa_adj):
sastr = f"{sa:.3f} ({sa_adj:.3f})"
else:
sastr = f"{sa:.3f}"
print(extstr+f"{max(hxo):.0f} / {max(hx):.0f}")
print(alstr+f"/calc : {sastr} / {ca:.3f}")
print(nrstr+f"(s/c/r) : {nx} ({nsdc}/{ncdc}/{nx_remain2})")
print()
if return_info:
if not fixed.size > 1:
sa_adj = sa
info = {'dmin': dmin,
'dmax': np.nanmax(hx),
'amin': np.nanmin([ca, sa, sa_adj]),
'amax': np.nanmax([ca, sa, sa_adj])}
return hx, x0, info
else:
return hx, x0
def get_cell_numbers(max_nr, max_prime=5, min_div=3):
r"""Returns 'good' cell numbers for the multigrid method.
'Good' cell numbers are numbers which can be divided by 2 as many times as
possible. At the end there will be a low prime number.
The function adds all numbers :math:`p 2^n \leq M` for :math:`p={2, 3, ...,
p_\text{max}}` and :math:`n={n_\text{min}, n_\text{min}+1, ..., \infty}`;
:math:`M, p_\text{max}, n_\text{min}` correspond to `max_nr`, `max_prime`,
and `min_div`, respectively.
Parameters
----------
max_nr : int
Maximum number of cells.
max_prime : int
Highest permitted prime number p for p*2^n. {2, 3, 5, 7} are good upper
limits in order to avoid too big lowest grids in the multigrid method.
Default is 5.
min_div : int
Minimum times the number can be divided by two.
Default is 3.
Returns
-------
numbers : array
Array containing all possible cell numbers from lowest to highest.
"""
# Primes till 20.
primes = np.array([2, 3, 5, 7, 11, 13, 17, 19])
# Sanity check; 19 is already ridiculously high.
if max_prime > primes[-1]:
print(f"* ERROR :: Highest prime is {max_prime}, "
"please use a value < 20.")
raise ValueError("Highest prime too high")
# Restrict to max_prime.
primes = primes[primes <= max_prime]
# Get possible values.
# Currently restricted to prime*2**30 (for prime=2 => 1,073,741,824 cells).
numbers = primes[:, None]*2**np.arange(min_div, 30)
# Get unique values.
numbers = np.unique(numbers)
# Restrict to max_nr and return.
return numbers[numbers <= max_nr]
def get_stretched_h(min_width, domain, nx, x0=0, x1=None, resp_domain=False):
"""Return cell widths for a stretched grid within the domain.
Returns `nx` cell widths within `domain`, where the minimum cell width is
`min_width`. The cells are not stretched within `x0` and `x1`, and outside
uses a power-law stretching. The actual stretching factor and the number of
cells left and right of `x0` and `x1` are find in a minimization process.
The domain is not completely respected. The starting point of the domain
is, but the endpoint of the domain might slightly shift (this is more
likely the case for small `nx`, for big `nx` the shift should be small).
The new endpoint can be obtained with ``domain[0]+np.sum(hx)``. If you want
the domain to be respected absolutely, set ``resp_domain=True``. However,
be aware that this will introduce one stretch-factor which is different
from the other stretch factors, to accommodate the restriction. This
one-off factor is between the left- and right-side of `x0`, or, if `x1` is
provided, just after `x1`.
See Also
--------
get_hx_x0 : Get `hx` and `x0` for a flexible number of `nx` with
given bounds.
Parameters
----------
min_width : float
Minimum cell width. If x1 is provided, the actual minimum cell width
might be smaller than min_width.
domain : list
[start, end] of model domain.
nx : int
Number of cells.
x0 : float
Center of the grid. `x0` is restricted to `domain`.
Default is 0.
x1 : float
If provided, then no stretching is applied between `x0` and `x1`. The
non-stretched part starts at `x0` and stops at the first possible
location at or after `x1`. `x1` is restricted to `domain`. This will
min_width so that an integer number of cells fit within x0 and x1.
resp_domain : bool
If False (default), then the domain-end might shift slightly to assure
that the same stretching factor is applied throughout. If set to True,
however, the domain is respected absolutely. This will introduce one
stretch-factor which is different from the other stretch factors, to
accommodate the restriction. This one-off factor is between the left-
and right-side of `x0`, or, if `x1` is provided, just after `x1`.
Returns
-------
hx : ndarray
Cell widths of mesh.
"""
# Cast to arrays
domain = np.array(domain, dtype=float)
x0 = np.array(x0, dtype=float)
x0 = np.clip(x0, *domain) # Restrict to model domain
min_width = | np.array(min_width, dtype=float) | numpy.array |
#!/usr/bin/env python
import numpy
from idl_functions import hist_equal
from idl_functions import bytscl
def data_convert(scalar, dtype):
"""
Converts an input scalar to a given data type.
:param scalar:
An integer or floating point scalar, eg 6.6, 4.
:param dtype:
A string representing the desired data type, eg 'uint8'.
:return:
A scalar converted to the desired data type. If there is no
matching data type, then scalar will be returned as a float64
data type.
"""
instr = str(dtype)
return {'int8': numpy.int8(scalar),
'uint8': numpy.uint8(scalar),
'int16': numpy.int16(scalar),
'uint16': numpy.uint16(scalar),
'int32': numpy.int32(scalar),
'uint32': numpy.uint32(scalar),
'int64': | numpy.int64(scalar) | numpy.int64 |
#!/usr/bin/env python
#ADAPTED FROM
#https://github.com/bio-ontology-research-group/deepgoplus/blob/master/evaluate_deepgoplus.py
import numpy as np
import pandas as pd
import click as ck
from sklearn.metrics import classification_report
from sklearn.metrics.pairwise import cosine_similarity
import sys
from collections import deque
import time
import logging
from sklearn.metrics import roc_curve, auc, matthews_corrcoef
from scipy.spatial import distance
from scipy import sparse
import math
#from utils2 import FUNC_DICT, Ontology, NAMESPACES
from matplotlib import pyplot as plt
#copied from utils.py
from collections import deque, Counter
import warnings
#import pandas as pd
#import numpy as np
#from xml.etree import ElementTree as ET
#import math
BIOLOGICAL_PROCESS = 'GO:0008150'
MOLECULAR_FUNCTION = 'GO:0003674'
CELLULAR_COMPONENT = 'GO:0005575'
FUNC_DICT = {
'cc': CELLULAR_COMPONENT,
'mf': MOLECULAR_FUNCTION,
'bp': BIOLOGICAL_PROCESS}
NAMESPACES = {
'cc': 'cellular_component',
'mf': 'molecular_function',
'bp': 'biological_process'
}
EXP_CODES = set([
'EXP', 'IDA', 'IPI', 'IMP', 'IGI', 'IEP', 'TAS', 'IC',])
# 'HTP', 'HDA', 'HMP', 'HGI', 'HEP'])
CAFA_TARGETS = set([
'10090', '223283', '273057', '559292', '85962',
'10116', '224308', '284812', '7227', '9606',
'160488', '237561', '321314', '7955', '99287',
'170187', '243232', '3702', '83333', '208963',
'243273', '44689', '8355'])
def is_cafa_target(org):
return org in CAFA_TARGETS
def is_exp_code(code):
return code in EXP_CODES
class Ontology(object):
def __init__(self, filename='data/go.obo', with_rels=False):
self.ont = self.load(filename, with_rels)
self.ic = None
def has_term(self, term_id):
return term_id in self.ont
def calculate_ic(self, annots):
cnt = Counter()
for x in annots:
cnt.update(x)
self.ic = {}
for go_id, n in cnt.items():
parents = self.get_parents(go_id)
if len(parents) == 0:
min_n = n
else:
min_n = min([cnt[x] for x in parents])
self.ic[go_id] = math.log(min_n / n, 2)
def get_ic(self, go_id):
if self.ic is None:
raise Exception('Not yet calculated')
if go_id not in self.ic:
return 0.0
return self.ic[go_id]
def load(self, filename, with_rels):
ont = dict()
obj = None
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
if line == '[Term]':
if obj is not None:
ont[obj['id']] = obj
obj = dict()
obj['is_a'] = list()
obj['part_of'] = list()
obj['regulates'] = list()
obj['alt_ids'] = list()
obj['is_obsolete'] = False
continue
elif line == '[Typedef]':
obj = None
else:
if obj is None:
continue
l = line.split(": ")
if l[0] == 'id':
obj['id'] = l[1]
elif l[0] == 'alt_id':
obj['alt_ids'].append(l[1])
elif l[0] == 'namespace':
obj['namespace'] = l[1]
elif l[0] == 'is_a':
obj['is_a'].append(l[1].split(' ! ')[0])
elif with_rels and l[0] == 'relationship':
it = l[1].split()
# add all types of relationships
obj['is_a'].append(it[1])
elif l[0] == 'name':
obj['name'] = l[1]
elif l[0] == 'is_obsolete' and l[1] == 'true':
obj['is_obsolete'] = True
if obj is not None:
ont[obj['id']] = obj
for term_id in list(ont.keys()):
for t_id in ont[term_id]['alt_ids']:
ont[t_id] = ont[term_id]
if ont[term_id]['is_obsolete']:
del ont[term_id]
for term_id, val in ont.items():
if 'children' not in val:
val['children'] = set()
for p_id in val['is_a']:
if p_id in ont:
if 'children' not in ont[p_id]:
ont[p_id]['children'] = set()
ont[p_id]['children'].add(term_id)
return ont
def get_anchestors(self, term_id):
if term_id not in self.ont:
return set()
term_set = set()
q = deque()
q.append(term_id)
while(len(q) > 0):
t_id = q.popleft()
if t_id not in term_set:
term_set.add(t_id)
for parent_id in self.ont[t_id]['is_a']:
if parent_id in self.ont:
q.append(parent_id)
return term_set
def get_parents(self, term_id):
if term_id not in self.ont:
return set()
term_set = set()
for parent_id in self.ont[term_id]['is_a']:
if parent_id in self.ont:
term_set.add(parent_id)
return term_set
def get_namespace_terms(self, namespace):
terms = set()
for go_id, obj in self.ont.items():
if obj['namespace'] == namespace:
terms.add(go_id)
return terms
def get_namespace(self, term_id):
return self.ont[term_id]['namespace']
def get_term_set(self, term_id):
if term_id not in self.ont:
return set()
term_set = set()
q = deque()
q.append(term_id)
while len(q) > 0:
t_id = q.popleft()
if t_id not in term_set:
term_set.add(t_id)
for ch_id in self.ont[t_id]['children']:
q.append(ch_id)
return term_set
def evaluate_deepgoplus(train_data_file, test_data_file, terms_file,
diamond_scores_file, gofile, ont, preds=None, export=False,evaluate=True,verbose=False):
go_rels = Ontology(gofile, with_rels=True)
if(isinstance(terms_file,list) or isinstance(terms_file,np.ndarray)):
terms = terms_file
else:
terms_df = pd.read_pickle(terms_file)
terms = terms_df['terms'].values.flatten()
terms_dict = {v: i for i, v in enumerate(terms)}
train_df = pd.read_pickle(train_data_file)
test_df = pd.read_pickle(test_data_file)
annotations = train_df['annotations'].values
annotations = list(map(lambda x: set(x), annotations))
test_annotations = test_df['annotations'].values
test_annotations = list(map(lambda x: set(x), test_annotations))
go_rels.calculate_ic(annotations + test_annotations)
# Print IC values of terms
ics = {}
for term in terms:
ics[term] = go_rels.get_ic(term)
prot_index = {}
for i, row in enumerate(train_df.itertuples()):
prot_index[row.proteins] = i
# BLAST Similarity (Diamond)
diamond_scores = {}
with open(diamond_scores_file) as f:
for line in f:
it = line.strip().split()
if it[0] not in diamond_scores:
diamond_scores[it[0]] = {}
diamond_scores[it[0]][it[1]] = float(it[2])
blast_preds = []
for i, row in enumerate(test_df.itertuples()):
annots = {}
prot_id = row.proteins
# BlastKNN
if prot_id in diamond_scores:
sim_prots = diamond_scores[prot_id]
allgos = set()
total_score = 0.0
for p_id, score in sim_prots.items():
allgos |= annotations[prot_index[p_id]]
total_score += score
allgos = list(sorted(allgos))
sim = np.zeros(len(allgos), dtype=np.float32)
for j, go_id in enumerate(allgos):
s = 0.0
for p_id, score in sim_prots.items():
if go_id in annotations[prot_index[p_id]]:
s += score
sim[j] = s / total_score
ind = np.argsort(-sim)
for go_id, score in zip(allgos, sim):
annots[go_id] = score
blast_preds.append(annots)
# DeepGOPlus
go_set = go_rels.get_namespace_terms(NAMESPACES[ont])
go_set.remove(FUNC_DICT[ont])
labels = test_df['annotations'].values
labels = list(map(lambda x: set(filter(lambda y: y in go_set, x)), labels))
# print(len(go_set))
deep_preds = []
alphas = {NAMESPACES['mf']: 0.55, NAMESPACES['bp']: 0.59, NAMESPACES['cc']: 0.46}
for i, row in enumerate(test_df.itertuples()):
annots_dict = blast_preds[i].copy()
for go_id in annots_dict:
annots_dict[go_id] *= alphas[go_rels.get_namespace(go_id)]
for j, score in enumerate(row.preds if preds is None else preds[i]):
go_id = terms[j]
score *= 1 - alphas[go_rels.get_namespace(go_id)]
if go_id in annots_dict:
annots_dict[go_id] += score
else:
annots_dict[go_id] = score
deep_preds.append(annots_dict)
if(export):
export_cafa(test_df,deep_preds,"DeepGOPlus_1_all.txt")
if(evaluate):
print("Evaluating scores")
compute_prmetrics(labels,deep_preds,go_rels,ont=ont,verbose=verbose)
#aucs = compute_roc(labels,deep_preds)
#print("aucs:",aucs)
#print("mean aucs(predicted):",np.mean(aucs))
#print("mean aucs(all):",(np.sum(aucs)+(len(test_annotations)-len(aucs))*0.5)/len(test_annotations))
def evaluate(train_data_file, test_data_file, terms_file,
gofile, ont, preds=None, propagate_scores=False,export=False,evaluate=True,verbose=False):
'''
train_data_file: path to train_data.pkl
test_data_file: path to test_data.pkl
terms_file: path to terms.pkl or just a list or nparray of labels
'''
go_rels = Ontology(gofile, with_rels=True)
if(isinstance(terms_file,list) or isinstance(terms_file,np.ndarray)):
terms = terms_file
else:
terms_df = pd.read_pickle(terms_file)
terms = terms_df['terms'].values.flatten()
terms_dict = {v: i for i, v in enumerate(terms)}
train_df = pd.read_pickle(train_data_file)
test_df = pd.read_pickle(test_data_file)
annotations = train_df['annotations'].values
annotations = list(map(lambda x: set(x), annotations))
test_annotations = test_df['annotations'].values
test_annotations = list(map(lambda x: set(x), test_annotations))
go_rels.calculate_ic(annotations + test_annotations)
# Print IC values of terms
ics = {}
for term in terms:
ics[term] = go_rels.get_ic(term)
prot_index = {}
for i, row in enumerate(train_df.itertuples()):
prot_index[row.proteins] = i
go_set = go_rels.get_namespace_terms(NAMESPACES[ont])
go_set.remove(FUNC_DICT[ont])
labels = test_df['annotations'].values
labels = list(map(lambda x: set(filter(lambda y: y in go_set, x)), labels))
if(preds is None):
deep_preds = []
for i, row in enumerate(test_df.itertuples()):
annots_dict = {}
for j, score in enumerate(row.preds):
go_id = terms[j]
annots_dict[go_id] = score
deep_preds.append(annots_dict)
else:
deep_preds = [{terms[i]:y for i,y in enumerate(x)} for x in preds]
# Propagate scores (a la deepgo)
if(propagate_scores):
print("Propagating scores a la deepgo")
deepgo_preds = []
for annots_dict in deep_preds:
annots = {}
for go_id, score in annots_dict.items():
for a_id in go_rels.get_anchestors(go_id):
if a_id in annots:
annots[a_id] = max(annots[a_id], score)
else:
annots[a_id] = score
deepgo_preds.append(annots)
deep_preds = deepgo_preds
if(export):
export_cafa(test_df,deep_preds,"UDSMProt_1_all.txt")
# compute PR metrics
if(evaluate):
print("Evaluating scores")
compute_prmetrics(labels,deep_preds,go_rels,ont=ont,verbose=False)
#aucs = compute_roc(np.array(labels),np.array(deep_preds))
#print("aucs:",aucs)
#print("mean aucs(predicted):",np.mean(aucs))
#print("mean aucs(all):",(np.sum(aucs)+(len(test_annotations)-len(aucs))*0.5)/len(test_annotations))
def export_cafa(test_df,deep_preds,filename='UDSMProt_1_all.txt'):#all->9606 for human
print("exporting predictions to CAFA submission format")
txt_out=[]
txt_out.append('AUTHOR UDSMProt\n')
txt_out.append('MODEL 1\n')
txt_out.append('KEYWORDS natural language processing.\n')
for i, row in enumerate(test_df.itertuples()):
prot_id = row.proteins
for go_id, score in deep_preds[i].items():
#print(f'{prot_id}\t{go_id}\t{score:.2f}')
score_str = "{0:.2f}".format(score)
if(score_str!="0.00"):
txt_out.append(str(prot_id)+"\t"+str(go_id)+"\t"+score_str+"\n")
txt_out.append('END')
with open(filename, 'w') as f:
f.writelines(txt_out)
def compute_prmetrics(labels,deep_preds,go_rels,ont="mf",verbose=False):
go_set = go_rels.get_namespace_terms(NAMESPACES[ont])
go_set.remove(FUNC_DICT[ont])
fmax = 0.0
tmax = 0.0
precisions = []
recalls = []
smin = 1000000.0
rus = []
mis = []
for t in range(0, 101):
threshold = t / 100.0
preds = []
for i in range(len(deep_preds)):
annots = set()
for go_id, score in deep_preds[i].items():
if score >= threshold:
annots.add(go_id)
new_annots = set()
for go_id in annots:
new_annots |= go_rels.get_anchestors(go_id)
preds.append(new_annots)
# Filter classes
preds = list(map(lambda x: set(filter(lambda y: y in go_set, x)), preds))
fscore, prec, rec, s, ru, mi, fps, fns = evaluate_annotations(go_rels, labels, preds)
avg_fp = sum(map(lambda x: len(x), fps)) / len(fps)
avg_ic = sum(map(lambda x: sum(map(lambda go_id: go_rels.get_ic(go_id), x)), fps)) / len(fps)
if(verbose):
print(f'{avg_fp} {avg_ic}')
precisions.append(prec)
recalls.append(rec)
print(f'Fscore: {fscore}, Precision: {prec}, Recall: {rec} S: {s}, RU: {ru}, MI: {mi} threshold: {threshold}')
if fmax < fscore:
fmax = fscore
tmax = threshold
if smin > s:
smin = s
print(f'Fmax: {fmax:0.3f}, Smin: {smin:0.3f}, threshold: {tmax}')
precisions = np.array(precisions)
recalls = np.array(recalls)
sorted_index = np.argsort(recalls)
recalls = recalls[sorted_index]
precisions = precisions[sorted_index]
aupr = | np.trapz(precisions, recalls) | numpy.trapz |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The modelgen module provides classes for specifying designs for individual
subject analysis of task-based fMRI experiments. In particular it also includes
algorithms for generating regressors for sparse and sparse-clustered acquisition
experiments.
These functions include:
* SpecifyModel: allows specification of sparse and non-sparse models
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
from copy import deepcopy
import os
from nibabel import load
import numpy as np
from scipy.special import gammaln
from nipype.interfaces.base import (BaseInterface, TraitedSpec, InputMultiPath,
traits, File, Bunch, BaseInterfaceInputSpec,
isdefined)
from nipype.utils.filemanip import filename_to_list
from .. import config, logging
from nipype.external import six
iflogger = logging.getLogger('interface')
def gcd(a, b):
"""Returns the greatest common divisor of two integers
uses Euclid's algorithm
>>> gcd(4, 5)
1
>>> gcd(4, 8)
4
>>> gcd(22, 55)
11
"""
while b > 0: a, b = b, a % b
return a
def spm_hrf(RT, P=None, fMRI_T=16):
""" python implementation of spm_hrf
see spm_hrf for implementation details
% RT - scan repeat time
% p - parameters of the response function (two gamma
% functions)
% defaults (seconds)
% p(0) - delay of response (relative to onset) 6
% p(1) - delay of undershoot (relative to onset) 16
% p(2) - dispersion of response 1
% p(3) - dispersion of undershoot 1
% p(4) - ratio of response to undershoot 6
% p(5) - onset (seconds) 0
% p(6) - length of kernel (seconds) 32
%
% hrf - hemodynamic response function
% p - parameters of the response function
the following code using scipy.stats.distributions.gamma
doesn't return the same result as the spm_Gpdf function
hrf = gamma.pdf(u, p[0]/p[2], scale=dt/p[2]) - gamma.pdf(u, p[1]/p[3], scale=dt/p[3])/p[4]
>>> print spm_hrf(2)
[ 0.00000000e+00 8.65660810e-02 3.74888236e-01 3.84923382e-01
2.16117316e-01 7.68695653e-02 1.62017720e-03 -3.06078117e-02
-3.73060781e-02 -3.08373716e-02 -2.05161334e-02 -1.16441637e-02
-5.82063147e-03 -2.61854250e-03 -1.07732374e-03 -4.10443522e-04
-1.46257507e-04]
"""
p = np.array([6, 16, 1, 1, 6, 0, 32], dtype=float)
if P is not None:
p[0:len(P)] = P
_spm_Gpdf = lambda x, h, l: np.exp(h * np.log(l) + (h - 1) * np.log(x) - (l * x) - gammaln(h))
# modelled hemodynamic response function - {mixture of Gammas}
dt = RT/float(fMRI_T)
u = np.arange(0, int(p[6]/dt+1)) - p[5]/dt
hrf = _spm_Gpdf(u, p[0]/p[2], dt/p[2]) - _spm_Gpdf(u, p[1]/p[3], dt/p[3])/p[4]
idx = np.arange(0, int((p[6]/RT)+1))*fMRI_T
hrf = hrf[idx]
hrf = hrf/np.sum(hrf)
return hrf
def orth(x_in, y_in):
"""Orthoganlize y_in with respect to x_in
>>> err = np.abs(np.array(orth([1, 2, 3],[4, 5, 6]) - np.array([1.7142857142857144, 0.42857142857142883, -0.85714285714285676])))
>>> all(err<np.finfo(float).eps)
True
"""
x = np.array(x_in)[:, None]
y = np.array(y_in)[:, None]
y = y - np.dot(x, np.dot(np.linalg.inv(np.dot(x.T, x)), np.dot(x.T, y)))
if np.linalg.norm(y, 1) > np.exp(-32):
y = y[:, 0].tolist()
else:
y = y_in
return y
def scale_timings(timelist, input_units, output_units, time_repetition):
"""Scales timings given input and output units (scans/secs)
Parameters
----------
timelist: list of times to scale
input_units: 'secs' or 'scans'
output_units: Ibid.
time_repetition: float in seconds
"""
if input_units==output_units:
_scalefactor = 1.
if (input_units == 'scans') and (output_units == 'secs'):
_scalefactor = time_repetition
if (input_units == 'secs') and (output_units == 'scans'):
_scalefactor = 1./time_repetition
timelist = [np.max([0., _scalefactor*t]) for t in timelist]
return timelist
def gen_info(run_event_files):
"""Generate subject_info structure from a list of event files
"""
info = []
for i, event_files in enumerate(run_event_files):
runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[])
for event_file in event_files:
_, name = os.path.split(event_file)
if '.run' in name:
name, _ = name.split('.run%03d'%(i+1))
elif '.txt' in name:
name, _ = name.split('.txt')
runinfo.conditions.append(name)
event_info = np.atleast_2d(np.loadtxt(event_file))
runinfo.onsets.append(event_info[:, 0].tolist())
if event_info.shape[1] > 1:
runinfo.durations.append(event_info[:, 1].tolist())
else:
runinfo.durations.append([0])
if event_info.shape[1] > 2:
runinfo.amplitudes.append(event_info[:, 2].tolist())
else:
delattr(runinfo, 'amplitudes')
info.append(runinfo)
return info
class SpecifyModelInputSpec(BaseInterfaceInputSpec):
subject_info = InputMultiPath(Bunch, mandatory=True, xor=['subject_info', 'event_files'],
desc=("Bunch or List(Bunch) subject specific condition information. "
"see :ref:`SpecifyModel` or SpecifyModel.__doc__ for details"))
event_files = InputMultiPath(traits.List(File(exists=True)), mandatory=True,
xor=['subject_info', 'event_files'],
desc=('list of event description files 1, 2 or 3 column format '
'corresponding to onsets, durations and amplitudes'))
realignment_parameters = InputMultiPath(File(exists=True),
desc = "Realignment parameters returned by motion correction algorithm",
copyfile=False)
outlier_files = InputMultiPath(File(exists=True),
desc="Files containing scan outlier indices that should be tossed",
copyfile=False)
functional_runs = InputMultiPath(traits.Either(traits.List(File(exists=True)),
File(exists=True)),
mandatory=True,
desc="Data files for model. List of 4D files or list of" \
"list of 3D files per session",
copyfile=False)
input_units = traits.Enum('secs', 'scans', mandatory=True,
desc = "Units of event onsets and durations (secs or scans)" \
"Output units are always in secs")
high_pass_filter_cutoff = traits.Float(mandatory=True,
desc="High-pass filter cutoff in secs")
time_repetition = traits.Float(mandatory=True,
desc = "Time between the start of one volume to the start of " \
"the next image volume.")
# Not implemented yet
#polynomial_order = traits.Range(0, low=0,
# desc ="Number of polynomial functions to model high pass filter.")
class SpecifyModelOutputSpec(TraitedSpec):
session_info = traits.Any(desc="session info for level1designs")
class SpecifyModel(BaseInterface):
"""Makes a model specification compatible with spm/fsl designers.
The subject_info field should contain paradigm information in the form of
a Bunch or a list of Bunch. The Bunch should contain the following
information::
[Mandatory]
- conditions : list of names
- onsets : lists of onsets corresponding to each condition
- durations : lists of durations corresponding to each condition. Should be left to a single 0 if all events are being modelled as impulses.
[Optional]
- regressor_names : list of str
list of names corresponding to each column. Should be None if
automatically assigned.
- regressors : list of lists
values for each regressor - must correspond to the number of
volumes in the functional run
- amplitudes : lists of amplitudes for each event. This will be ignored by
SPM's Level1Design.
The following two (tmod, pmod) will be ignored by any Level1Design class
other than SPM:
- tmod : lists of conditions that should be temporally modulated. Should
default to None if not being used.
- pmod : list of Bunch corresponding to conditions
- name : name of parametric modulator
- param : values of the modulator
- poly : degree of modulation
Alternatively, you can provide information through event files.
The event files have to be in 1, 2 or 3 column format with the columns
corresponding to Onsets, Durations and Amplitudes and they have to have the
name event_name.runXXX... e.g.: Words.run001.txt. The event_name part will
be used to create the condition names.
Examples
--------
>>> from nipype.interfaces.base import Bunch
>>> s = SpecifyModel()
>>> s.inputs.input_units = 'secs'
>>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii']
>>> s.inputs.time_repetition = 6
>>> s.inputs.high_pass_filter_cutoff = 128.
>>> info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], durations=[[1]]), \
Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])]
>>> s.inputs.subject_info = info
Using pmod:
>>> info = [Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 50],[100, 180]], durations=[[0],[0]], pmod=[Bunch(name=['amp'], poly=[2], param=[[1, 2]]), None]), \
Bunch(conditions=['cond1', 'cond2'], onsets=[[20, 120],[80, 160]], durations=[[0],[0]], pmod=[Bunch(name=['amp'], poly=[2], param=[[1, 2]]), None])]
>>> s.inputs.subject_info = info
"""
input_spec = SpecifyModelInputSpec
output_spec = SpecifyModelOutputSpec
def _generate_standard_design(self, infolist,
functional_runs=None,
realignment_parameters=None,
outliers=None):
""" Generates a standard design matrix paradigm given information about
each run
"""
sessinfo = []
output_units = 'secs'
if 'output_units' in self.inputs.traits():
output_units = self.inputs.output_units
for i, info in enumerate(infolist):
sessinfo.insert(i, dict(cond=[]))
if isdefined(self.inputs.high_pass_filter_cutoff):
sessinfo[i]['hpf'] = np.float(self.inputs.high_pass_filter_cutoff)
if hasattr(info, 'conditions') and info.conditions is not None:
for cid, cond in enumerate(info.conditions):
sessinfo[i]['cond'].insert(cid, dict())
sessinfo[i]['cond'][cid]['name'] = info.conditions[cid]
sessinfo[i]['cond'][cid]['onset'] = scale_timings(info.onsets[cid],
self.inputs.input_units,
output_units,
self.inputs.time_repetition)
sessinfo[i]['cond'][cid]['duration'] = scale_timings(info.durations[cid],
self.inputs.input_units,
output_units,
self.inputs.time_repetition)
if hasattr(info, 'amplitudes') and info.amplitudes:
sessinfo[i]['cond'][cid]['amplitudes'] = info.amplitudes[cid]
if hasattr(info, 'tmod') and info.tmod and len(info.tmod)>cid:
sessinfo[i]['cond'][cid]['tmod'] = info.tmod[cid]
if hasattr(info, 'pmod') and info.pmod and len(info.pmod)>cid:
if info.pmod[cid]:
sessinfo[i]['cond'][cid]['pmod'] = []
for j, name in enumerate(info.pmod[cid].name):
sessinfo[i]['cond'][cid]['pmod'].insert(j,{})
sessinfo[i]['cond'][cid]['pmod'][j]['name'] = name
sessinfo[i]['cond'][cid]['pmod'][j]['poly'] = info.pmod[cid].poly[j]
sessinfo[i]['cond'][cid]['pmod'][j]['param'] = info.pmod[cid].param[j]
sessinfo[i]['regress']= []
if hasattr(info, 'regressors') and info.regressors is not None:
for j, r in enumerate(info.regressors):
sessinfo[i]['regress'].insert(j, dict(name='', val=[]))
if hasattr(info, 'regressor_names') and info.regressor_names is not None:
sessinfo[i]['regress'][j]['name'] = info.regressor_names[j]
else:
sessinfo[i]['regress'][j]['name'] = 'UR%d'%(j+1)
sessinfo[i]['regress'][j]['val'] = info.regressors[j]
sessinfo[i]['scans'] = functional_runs[i]
if realignment_parameters is not None:
for i, rp in enumerate(realignment_parameters):
mc = realignment_parameters[i]
for col in range(mc.shape[1]):
colidx = len(sessinfo[i]['regress'])
sessinfo[i]['regress'].insert(colidx, dict(name='', val=[]))
sessinfo[i]['regress'][colidx]['name'] = 'Realign%d'%(col+1)
sessinfo[i]['regress'][colidx]['val'] = mc[:, col].tolist()
if outliers is not None:
for i, out in enumerate(outliers):
numscans = 0
for f in filename_to_list(sessinfo[i]['scans']):
shape = load(f).get_shape()
if len(shape) == 3 or shape[3] == 1:
iflogger.warning("You are using 3D instead of 4D files. Are you sure this was intended?")
numscans += 1
else:
numscans += shape[3]
for j, scanno in enumerate(out):
colidx = len(sessinfo[i]['regress'])
sessinfo[i]['regress'].insert(colidx, dict(name='', val=[]))
sessinfo[i]['regress'][colidx]['name'] = 'Outlier%d'%(j+1)
sessinfo[i]['regress'][colidx]['val'] = np.zeros((1, numscans))[0].tolist()
sessinfo[i]['regress'][colidx]['val'][int(scanno)] = 1
return sessinfo
def _generate_design(self, infolist=None):
"""Generate design specification for a typical fmri paradigm
"""
realignment_parameters = []
if isdefined(self.inputs.realignment_parameters):
for parfile in self.inputs.realignment_parameters:
realignment_parameters.append(np.loadtxt(parfile))
outliers = []
if isdefined(self.inputs.outlier_files):
for filename in self.inputs.outlier_files:
try:
outindices = np.loadtxt(filename, dtype=int)
except IOError:
outliers.append([])
else:
if outindices.size == 1:
outliers.append([outindices.tolist()])
else:
outliers.append(outindices.tolist())
if infolist is None:
if isdefined(self.inputs.subject_info):
infolist = self.inputs.subject_info
else:
infolist = gen_info(self.inputs.event_files)
self._sessinfo = self._generate_standard_design(infolist,
functional_runs=self.inputs.functional_runs,
realignment_parameters=realignment_parameters,
outliers=outliers)
def _run_interface(self, runtime):
"""
"""
self._sessioninfo = None
self._generate_design()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
if not hasattr(self, '_sessinfo'):
self._generate_design()
outputs['session_info'] = self._sessinfo
return outputs
class SpecifySPMModelInputSpec(SpecifyModelInputSpec):
concatenate_runs = traits.Bool(False, usedefault=True,
desc="Concatenate all runs to look like a single session.")
output_units = traits.Enum('secs', 'scans', usedefault=True,
desc = "Units of design event onsets and durations " \
"(secs or scans)")
class SpecifySPMModel(SpecifyModel):
"""Adds SPM specific options to SpecifyModel
adds:
- concatenate_runs
- output_units
Examples
--------
>>> from nipype.interfaces.base import Bunch
>>> s = SpecifySPMModel()
>>> s.inputs.input_units = 'secs'
>>> s.inputs.output_units = 'scans'
>>> s.inputs.high_pass_filter_cutoff = 128.
>>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii']
>>> s.inputs.time_repetition = 6
>>> s.inputs.concatenate_runs = True
>>> info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], durations=[[1]]), \
Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])]
>>> s.inputs.subject_info = info
"""
input_spec = SpecifySPMModelInputSpec
def _concatenate_info(self, infolist):
nscans = []
for i, f in enumerate(self.inputs.functional_runs):
if isinstance(f, list):
numscans = len(f)
elif isinstance(f, six.string_types):
img = load(f)
numscans = img.get_shape()[3]
else:
raise Exception('Functional input not specified correctly')
nscans.insert(i, numscans)
# now combine all fields into 1
# names, onsets, durations, amplitudes, pmod, tmod, regressor_names, regressors
infoout = infolist[0]
for i, info in enumerate(infolist[1:]):
#info.[conditions, tmod] remain the same
if info.onsets:
for j, val in enumerate(info.onsets):
if self.inputs.input_units == 'secs':
infoout.onsets[j].extend((np.array(info.onsets[j])+
self.inputs.time_repetition*sum(nscans[0:(i+1)])).tolist())
else:
infoout.onsets[j].extend((np.array(info.onsets[j])+sum(nscans[0:(i+1)])).tolist())
for j, val in enumerate(info.durations):
if len(val) > 1:
infoout.durations[j].extend(info.durations[j])
if hasattr(info, 'amplitudes') and info.amplitudes:
for j, val in enumerate(info.amplitudes):
infoout.amplitudes[j].extend(info.amplitudes[j])
if hasattr(info, 'pmod') and info.pmod:
for j, val in enumerate(info.pmod):
if val:
for key, data in enumerate(val.param):
infoout.pmod[j].param[key].extend(data)
if hasattr(info, 'regressors') and info.regressors:
#assumes same ordering of regressors across different
#runs and the same names for the regressors
for j, v in enumerate(info.regressors):
infoout.regressors[j].extend(info.regressors[j])
#insert session regressors
if not hasattr(infoout, 'regressors') or not infoout.regressors:
infoout.regressors = []
onelist = np.zeros((1, sum(nscans)))
onelist[0, sum(nscans[0:(i)]):sum(nscans[0:(i+1)])] = 1
infoout.regressors.insert(len(infoout.regressors), onelist.tolist()[0])
return [infoout], nscans
def _generate_design(self, infolist=None):
if not isdefined(self.inputs.concatenate_runs) or not self.inputs.concatenate_runs:
super(SpecifySPMModel, self)._generate_design(infolist=infolist)
return
if isdefined(self.inputs.subject_info):
infolist = self.inputs.subject_info
else:
infolist = gen_info(self.inputs.event_files)
concatlist, nscans = self._concatenate_info(infolist)
functional_runs = [filename_to_list(self.inputs.functional_runs)]
realignment_parameters = []
if isdefined(self.inputs.realignment_parameters):
realignment_parameters = []
for parfile in self.inputs.realignment_parameters:
mc = np.loadtxt(parfile)
if not realignment_parameters:
realignment_parameters.insert(0, mc)
else:
realignment_parameters[0] = np.concatenate((realignment_parameters[0], mc))
outliers = []
if isdefined(self.inputs.outlier_files):
outliers = [[]]
for i, filename in enumerate(self.inputs.outlier_files):
try:
out = np.loadtxt(filename, dtype=int)
except IOError:
out = np.array([])
if out.size>0:
if out.size == 1:
outliers[0].extend([(np.array(out)+sum(nscans[0:i])).tolist()])
else:
outliers[0].extend((np.array(out)+sum(nscans[0:i])).tolist())
self._sessinfo = self._generate_standard_design(concatlist,
functional_runs=functional_runs,
realignment_parameters=realignment_parameters,
outliers=outliers)
class SpecifySparseModelInputSpec(SpecifyModelInputSpec):
time_acquisition = traits.Float(0, mandatory=True,
desc = "Time in seconds to acquire a single image volume")
volumes_in_cluster = traits.Range(1, usedefault=True,
desc="Number of scan volumes in a cluster")
model_hrf = traits.Bool(desc="model sparse events with hrf")
stimuli_as_impulses = traits.Bool(True,
desc = "Treat each stimulus to be impulse like.",
usedefault=True)
use_temporal_deriv = traits.Bool(requires=['model_hrf'],
desc = "Create a temporal derivative in addition to regular regressor")
scale_regressors = traits.Bool(True, desc="Scale regressors by the peak",
usedefault=True)
scan_onset = traits.Float(0.0,
desc="Start of scanning relative to onset of run in secs",
usedefault=True)
save_plot = traits.Bool(desc='save plot of sparse design calculation ' \
'(Requires matplotlib)')
class SpecifySparseModelOutputSpec(SpecifyModelOutputSpec):
sparse_png_file = File(desc='PNG file showing sparse design')
sparse_svg_file = File(desc='SVG file showing sparse design')
class SpecifySparseModel(SpecifyModel):
""" Specify a sparse model that is compatible with spm/fsl designers
References
----------
.. [1] Ghosh et al. (2009) OHBM http://dl.dropbox.com/u/363467/OHBM2009_HRF.pdf
Examples
--------
>>> from nipype.interfaces.base import Bunch
>>> s = SpecifySparseModel()
>>> s.inputs.input_units = 'secs'
>>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii']
>>> s.inputs.time_repetition = 6
>>> s.inputs.time_acquisition = 2
>>> s.inputs.high_pass_filter_cutoff = 128.
>>> s.inputs.model_hrf = True
>>> info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], durations=[[1]]), \
Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])]
>>> s.inputs.subject_info = info
"""
input_spec = SpecifySparseModelInputSpec
output_spec = SpecifySparseModelOutputSpec
def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans):
"""Generates a regressor for a sparse/clustered-sparse acquisition
"""
bplot = False
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
bplot=True
import matplotlib
matplotlib.use(config.get("execution", "matplotlib_backend"))
import matplotlib.pyplot as plt
TR = np.round(self.inputs.time_repetition*1000) # in ms
if self.inputs.time_acquisition:
TA = np.round(self.inputs.time_acquisition*1000) # in ms
else:
TA = TR # in ms
nvol = self.inputs.volumes_in_cluster
SCANONSET = np.round(self.inputs.scan_onset*1000)
total_time = TR*(nscans-nvol)/nvol + TA*nvol + SCANONSET
SILENCE = TR-TA*nvol
dt = TA/10.;
durations = np.round(np.array(i_durations)*1000)
if len(durations) == 1:
durations = durations*np.ones((len(i_onsets)))
onsets = np.round(np.array(i_onsets)*1000)
dttemp = gcd(TA, gcd(SILENCE, TR))
if dt < dttemp:
if dttemp % dt != 0:
dt = gcd(dttemp, dt)
if dt < 1:
raise Exception("Time multiple less than 1 ms")
iflogger.info("Setting dt = %d ms\n" % dt)
npts = int(total_time/dt)
times = np.arange(0, total_time, dt)*1e-3
timeline = np.zeros((npts))
timeline2 = np.zeros((npts))
if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf:
hrf = spm_hrf(dt*1e-3)
reg_scale = 1.0
if self.inputs.scale_regressors:
boxcar = np.zeros((50.*1e3/dt))
if self.inputs.stimuli_as_impulses:
boxcar[1.*1e3/dt] = 1.0
reg_scale = float(TA/dt)
else:
boxcar[1.*1e3/dt:2.*1e3/dt] = 1.0
if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf:
response = np.convolve(boxcar, hrf)
reg_scale = 1./response.max()
iflogger.info('response sum: %.4f max: %.4f'%(response.sum(), response.max()))
iflogger.info('reg_scale: %.4f'%reg_scale)
for i, t in enumerate(onsets):
idx = int(t/dt)
if i_amplitudes:
if len(i_amplitudes)>1:
timeline2[idx] = i_amplitudes[i]
else:
timeline2[idx] = i_amplitudes[0]
else:
timeline2[idx] = 1
if bplot:
plt.subplot(4, 1, 1)
plt.plot(times, timeline2)
if not self.inputs.stimuli_as_impulses:
if durations[i] == 0:
durations[i] = TA*nvol
stimdur = np.ones((int(durations[i]/dt)))
timeline2 = np.convolve(timeline2, stimdur)[0:len(timeline2)]
timeline += timeline2
timeline2[:] = 0
if bplot:
plt.subplot(4, 1, 2)
plt.plot(times, timeline)
if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf:
timeline = np.convolve(timeline, hrf)[0:len(timeline)]
if isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv:
#create temporal deriv
timederiv = np.concatenate(([0], np.diff(timeline)))
if bplot:
plt.subplot(4, 1, 3)
plt.plot(times, timeline)
if isdefined(self.inputs.use_temporal_deriv) and self.inputs.use_temporal_deriv:
plt.plot(times, timederiv)
# sample timeline
timeline2 = np.zeros((npts))
reg = []
regderiv = []
for i, trial in enumerate( | np.arange(nscans) | numpy.arange |
''' temporary : Use this to use the forked version of GPy'''
import sys
sys.path.insert(1, '/home/breux/GPy')
import numpy as np
import matplotlib.pyplot as plt
import GPy
'''
Gaussian Processes regression with censored data example using artificial data as in
"Gaussian Process Regression with Censored Data Using Expectation Propagation, <NAME>, <NAME>"
'''
def f(x):
return ((6.*x - 2.)**2)*np.sin(2.*(6.*x-2.))
def plotModels(xc, yc, xc2, yc2, m_tobit, m_normal, m_normalWithoutCensoredData):
x_gt = np.linspace(0,1,200)
y_gt = f(x_gt)
fig, ax = plt.subplots()
plt.title("Tobit GP model")
plt.plot(x_gt, y_gt, linestyle='-', color="r", label="GT")
plt.plot(xc, yc, linestyle='None', marker='+', markersize=10, color='k', label="Data")
m_tobit.plot_f(fignum=0, ax=ax)
plt.xlim([0, 1])
fig, ax = plt.subplots()
plt.title("Standart GP model")
plt.plot(x_gt, y_gt,linestyle='-', color="r", label="GT")
plt.plot(xc, yc, linestyle='None', marker='+', markersize=10, color='k', label="Data")
m_normal.plot_f(fignum=1, ax=ax)
plt.xlim([0,1])
fig, ax = plt.subplots()
plt.title("Standart ignoring censured data GP model")
plt.plot(x_gt, y_gt, linestyle='-', color="r", label="GT")
plt.plot(xc2, yc2, linestyle='None', marker='+', markersize=10, color='k', label="Data")
m_normalWithoutCensoredData.plot_f(fignum=2, ax=ax)
plt.xlim([0, 1])
plt.show()
def artificialExample():
''' Generate Data '''
np.random.seed(4)
n = 30
x = np.linspace(0,1,n)
y = f(x) + np.random.normal(0, np.sqrt(0.1), x.shape[0])
x = x.reshape((n,1))
l = -0.45 #-0.2265
lowerCensoredData = np.zeros((n,), dtype='int64')
lowerCensoredData_indexes = [idx for idx, val in np.ndenumerate(y) if val < l]
| np.put(lowerCensoredData, lowerCensoredData_indexes, 1) | numpy.put |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import capsule_distance
capsule = capsule_distance.Capsule(0.18, -0.5, 0.45)
#from matplotlib_scalebar.scalebar import ScaleBar
#plt.rcParams['axes.facecolor'] = "xkcd:spring green"#'black'
# plt.rcParams.update({
# "text.usetex": True,
# "font.family": "serif",
# "font.serif": ["Palatino"],
# })
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size' : 12})
#rc('text', usetex=True)
#from matplotlib.ticker import FormatStrFormatter
all_traces_rds = np.genfromtxt('../traces_dynamic_systematic_rds.csv',
delimiter=';')
all_traces_baseline = np.genfromtxt('../traces_dynamic_systematic_baseline.csv',
delimiter=';')
data = [all_traces_rds, all_traces_baseline]
fig, axes = plt.subplots(1, 2, sharey=True, subplot_kw={"adjustable":'box-forced'})
fig.subplots_adjust(wspace=0.025)
for m in [0, 1]:
all_traces = data[m]
traces_max_index = np.max(all_traces[:, 4])
#traces_list = [None]*traces_max_index
#for i in range(traces_max_index):
# this_trace_row_indices = (all_traces[:, 4] == i)
# traces_list[i] = all_traces[this_trace_row_indices, 0:4]
#for trace in traces_list:
# plt.plot(trace[])
trace_start_indices = [0]
for i in range(all_traces.shape[0]):
if len(trace_start_indices)-1 < all_traces[i, 4]:
trace_start_indices.append(i)
# re-arrange the data
trace_start_indices.append(all_traces.shape[0])
traces_ordered = np.empty([all_traces.shape[0]*2, 3])
write_start = 0
for i in range(10):
sub_range_alpha = range(trace_start_indices[i], trace_start_indices[i+1])
write_length = trace_start_indices[i+1] - trace_start_indices[i]
traces_ordered[write_start:(write_start+write_length), 0:2] = all_traces[sub_range_alpha, 0:2]
traces_ordered[write_start:(write_start+write_length), 2] = all_traces[sub_range_alpha, 4]
write_start += write_length
traces_ordered[write_start:(write_start+write_length), 0:2] = all_traces[sub_range_alpha, 2:4]
traces_ordered[write_start:(write_start+write_length), 2] = all_traces[sub_range_alpha, 4]
write_start += write_length
sub_range_omega = range(trace_start_indices[19 - i], trace_start_indices[20 - i])
write_length = trace_start_indices[20 - i] - trace_start_indices[19 - i]
traces_ordered[write_start:(write_start+write_length), 0:2] = all_traces[sub_range_omega, 0:2]
traces_ordered[write_start:(write_start+write_length), 2] = all_traces[sub_range_omega, 4]
write_start += write_length
traces_ordered[write_start:(write_start+write_length), 0:2] = all_traces[sub_range_omega, 2:4]
traces_ordered[write_start:(write_start+write_length), 2] = all_traces[sub_range_omega, 4]
write_start += write_length
ax = axes[m]
the_cmap = cm.coolwarm#cm.PiYG#cm.brg#cm.viridis#cm.plasma #cm.cool
ax.scatter(traces_ordered[:,0], traces_ordered[:,1], c=traces_ordered[:,2]/float(traces_max_index),
cmap=the_cmap, marker='o', lw=0.1, s=8, edgecolor='k')
ax.set_aspect("equal")
ax.set_xlim([-2.7, 3.2])
ax.set_ylim([-2.7, 3.2])
ax.plot([-2.7, 3.2], [0.0, 0.0], 'k--', linewidth=1)
ax.plot([0.0, 0.0], [-2.7, 3.2], 'k--', linewidth=1)
if m == 0:
shift_x = -5.0
shift_y = -5.0
ax.text(2.9+shift_x, 2.6+shift_y, "1 m")
ax.plot([2.75+shift_x,3.75+shift_x], [2.5+shift_y, 2.5+shift_y],'k', linewidth=1)
ax.plot([2.75+shift_x,2.75+shift_x], [2.45+shift_y, 2.55+shift_y],'k', linewidth=1)
ax.plot([3.75+shift_x,3.75+shift_x], [2.45+shift_y, 2.55+shift_y],'k', linewidth=1)
y_colorscale = | np.linspace(0.5, 1.75, 20) | numpy.linspace |
#!/usr/bin/env python3
import os, sys
import tensorflow as tf
import numpy as np
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' #all info
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #warning, error
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #error
def add_layer(inputs, in_size, out_size, activation_function=None):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Wx_plus_b = tf.matmul(inputs, Weights) + biases
keep_prob = 1
Wx_plus_b = tf.nn.dropout(Wx_plus_b, keep_prob)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
x_data = | np.linspace(-1, 1, 3000) | numpy.linspace |
# Copyright 2020 <NAME>
# This file is licensed under the MIT license (see LICENSE.txt).
import numpy as np
import torch
from torch.utils import data
from asvtorch.src.settings.settings import Settings
from asvtorch.src.utterances.utterance_list import UtteranceList
from asvtorch.src.frontend.featureloaders.featureloader import FeatureLoader
def _get_clip_indices(utt_start, utt_end, batch_start, batch_end):
""" Cuts the parts of the utterance that do not fit into the batch window.
Arguments:
utt_start {int} -- start point of the utterance
utt_end {int} -- end point of the utterance
batch_start {int} -- start point of the batch window
batch_end {int} -- end point of the batch window
Returns:
(int, int), bool -- a tuple containing clipped start and end point of an utterance, the boolean flag is True if the end of the utterance is inside the batch window.
"""
if utt_end <= batch_start:
return None
if utt_start >= batch_end:
return None
start = 0
end = utt_end - utt_start
if utt_start < batch_start:
start = batch_start - utt_start
if utt_end > batch_end:
end = batch_end - utt_start
ends = utt_end <= batch_end
return (start, end), ends
class _Kaldi_Dataset(data.Dataset):
def __init__(self, dataset: UtteranceList):
self.dataset = dataset
frames_per_batch = Settings().posterior_extraction.batch_size_in_frames
break_points = dataset.get_breakpoints_after_frame_selection()
n_active_frames = break_points[-1]
n_batches = int( | np.ceil(n_active_frames / frames_per_batch) | numpy.ceil |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
def make_fig():
pass
def update(val):
amp = sld_width_upper.val
freq = sld_width_lower.val
l.set_ydata(amp*np.sin(2*np.pi*freq*t))
fig.canvas.draw_idle()
def reset(event):
sld_width_lower.reset()
sld_width_upper.reset()
def colorfunc(label):
l.set_color(label)
fig.canvas.draw_idle()
if __name__ == '__main__':
# The data we need for plotting
points = 1000
widths = | np.arange(100) | numpy.arange |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test_cont_break """
import numpy as np
import mindspore as ms
from mindspore import Tensor, context, nn, ms_function
from mindspore.nn import Cell
from mindspore.ops import operations as P
class WhileSubGraphParam(Cell):
def __init__(self):
super().__init__()
self.update = ms.Parameter(Tensor(1, ms.float32), "update")
def construct(self, x, y, z):
out1 = z
while x < y:
self.update = self.update + 1
out1 = out1 + 1
x = x + 1
return out1, self.update
def test_while_loop_phi():
context.set_context(mode=context.GRAPH_MODE)
x = Tensor(0, ms.float32)
y = Tensor(10, ms.float32)
z = Tensor(100, ms.float32)
net = WhileSubGraphParam()
net(x, y, z)
class WhileSubGraphParam2(Cell):
def __init__(self):
super().__init__()
self.update = ms.Parameter(Tensor(1, ms.float32), "update")
def construct(self, x, y, z):
out1 = z
i = self.update
while x < y:
i = i + 1
out1 = out1 + 1
x = x + 1
return out1, self.update
def test_while_loop_phi_2():
context.set_context(mode=context.GRAPH_MODE)
x = Tensor(0, ms.float32)
y = Tensor(10, ms.float32)
z = Tensor(100, ms.float32)
net = WhileSubGraphParam2()
net(x, y, z)
class WhileSubGraphParam3(Cell):
def __init__(self, initial_input_x):
super().__init__()
self.initial_input_x = initial_input_x
self.X = ms.Parameter(initial_input_x, name="parameter_x")
self.Y = ms.Parameter(self.initial_input_x, name="parameter_y")
def construct(self):
a = 0
while a < 3:
self.X = self.X + self.Y
a += 1
return self.X
def test_while_loop_phi_3():
context.set_context(mode=context.GRAPH_MODE)
x = Tensor(0, ms.float32)
net = WhileSubGraphParam3(x)
net()
class ControlMixedWhileIf(nn.Cell):
def __init__(self):
super().__init__()
self.assign = P.Assign()
self.var = ms.Parameter(ms.Tensor([1], ms.float32), name="var")
@ms_function
def construct(self, x, y, z, c2, c4):
out = self.assign(self.var, c4)
while x < c2:
y = self.assign(self.var, c4)
while y < c2 and x < c2:
if 2 * y < c2:
y = y + 2
else:
y = y + 1
out = out + y
z = self.assign(self.var, c4)
while z < c2:
z = z + 1
out = out + z
x = x + 1
out = out + x
while x < 2 * c2:
y = self.assign(self.var, c4)
x = x + 1
while y < c2:
z = self.assign(self.var, c4)
while z < c2:
z = z + 1
if x < c2:
y = y - 1
else:
y = y + 1
out = out + z
out = out + y
out = out + x
return out
def test_mixed_while_if():
context.set_context(mode=context.PYNATIVE_MODE)
x = np.array(2).astype(np.int32)
y = np.array(14).astype(np.int32)
z = | np.array(1) | numpy.array |
# Change: Modifying so that the ends are straight coarse bricks
import matplotlib.pyplot as plt
import numpy as np
from scipy import spatial
import csv
import os
def NodeGen2DV45(x0,xl,y0,yl,z0,elemLenX,elemLenY,numElemX,numElemY,shiftX,shiftY):
# Nodal coordinated
nodeX1=np.linspace(x0,xl,numElemX);
nodeY1=y0+np.zeros(np.shape(nodeX1)) #np.arange(0,specLenY+elemLenY,elemLenY);
#
nodeX2=np.linspace(x0+shiftX,xl-shiftX,numElemX-1);
nodeY2=y0+np.zeros(np.shape(nodeX2))+shiftY
#
# Create all nodes
count=1;
Node=np.array([[0,0,0,0]])
for j in range(0,int(numElemY)-1):
for i in range(0,len(nodeX1)):
Node=np.append(Node,[[int(count+i),nodeX1[i],nodeY1[i]+j*elemLenY,z0]],axis=0)
count=len(Node)
for i in range(0,len(nodeX2)):
Node=np.append(Node,[[int(count+i),nodeX2[i],nodeY2[i]+j*elemLenY,z0]],axis=0)
count=len(Node)
# last line
for i in range(0,len(nodeX1)):
Node=np.append(Node,[[int(count+i),nodeX1[i],nodeY1[i]+(j+1)*elemLenY,z0]],axis=0)
Node=Node[1:len(Node)]
return Node
def NodeGen2DV90(x0,xl,y0,yl,z0,elemLenX,elemLenY,numElemX,numElemY):
# Nodal coordinated
nodeX1=np.linspace(x0,xl,numElemX);
nodeY1=y0+np.zeros(np.shape(nodeX1)) #np.arange(0,specLenY+elemLenY,elemLenY)
# Create all nodes
count=1;
Node=np.array([[0,0,0,0]])
for j in range(0,int(numElemY)):
for i in range(0,len(nodeX1)):
Node=np.append(Node,[[int(count+i),nodeX1[i],nodeY1[i]+j*elemLenY,z0]],axis=0)
count=len(Node)
#
Node=Node[1:len(Node)]
elemLenX=nodeX1[1]-nodeX1[0]
return Node,elemLenX,elemLenY
def FindNodes(loc,Node):
NCorners=[[0,0,0,0]]
for i in range(len(loc)):
NCornersTmp=Node[(Node[:,1]==loc[i,0])]
NCornersTmp=NCornersTmp[(NCornersTmp[:,2]==loc[i,1])]
NCorners=np.append(NCorners,NCornersTmp, axis=0)
NCorners=NCorners[1:len(NCorners)]
return NCorners
def FindNodeRange(loc,Node,elemLenX,elemLenY):
loc=[loc[0]-1e-5,loc[1]-1e-5]
NCornersTmp=Node
NCornersTmp=Node[(Node[:,1]>=loc[0])]
NCornersTmp=NCornersTmp[(NCornersTmp[:,1]<=loc[0]+1.5*elemLenX)]
NCornersTmp=NCornersTmp[(NCornersTmp[:,2]>=loc[1])]
NCornersTmp=NCornersTmp[(NCornersTmp[:,2]<=loc[1]+1.5*elemLenY)]
return NCornersTmp
def FindBoundariesV2(Node,x0,xl,y0,yl,numElemX,numElemY):
# Find corners
#loc=np.array([[x0,y0],[xl,0],[0,yl],[xl,yl]])
loc=np.array([[x0,y0]])
NCorners= FindNodes(loc,Node)
# Find bottom edge
Xrange=np.linspace(x0,xl,numElemX)
Yrange=np.ones(np.shape(Xrange))*y0
loc=np.transpose(np.array([Xrange,Yrange]))
NBtmEdge= FindNodes(loc,Node)
# Find top edge
Xrange=np.linspace(x0,xl,numElemX)
Yrange=np.ones(np.shape(Xrange))*yl
loc=np.transpose(np.array([Xrange,Yrange]))
NTopEdge= FindNodes(loc,Node)
# Find left edge
Yrange=np.linspace(y0,yl,numElemY)
Xrange=np.ones(np.shape(Yrange))*x0
loc=np.transpose(np.array([Xrange,Yrange]))
NLeftEdge= FindNodes(loc,Node)
# Find right edge
Yrange=np.linspace(y0,yl,numElemY)
Xrange=np.ones(np.shape(Yrange))*xl
loc=np.transpose(np.array([Xrange,Yrange]))
NRightEdge= FindNodes(loc,Node)
NBoundary=np.append(NBtmEdge,NRightEdge,axis=0)
NBoundary=np.append(NBoundary,NTopEdge,axis=0)
NBoundary=np.append(NBoundary,NLeftEdge,axis=0)
return NCorners,NBtmEdge,NTopEdge,NLeftEdge,NRightEdge,NBoundary
def FindBoundaries(Node,specLenX,specLenY,elemLenX,elemLenY):
# Find corners
loc=np.array([[0,0],[specLenX,0],[0,specLenY],[specLenX,specLenY]])
NCorners= FindNodes(loc,Node)
# Find bottom edge
Xrange=np.arange(0,specLenX,elemLenX)
Yrange=np.ones(np.shape(Xrange))*0
loc=np.transpose(np.array([Xrange,Yrange]))
NBtmEdge= FindNodes(loc,Node)
# Find top edge
Xrange=np.arange(0,specLenX,elemLenX)
Yrange=np.ones(np.shape(Xrange))*specLenY
loc=np.transpose(np.array([Xrange,Yrange]))
NTopEdge= FindNodes(loc,Node)
# Find left edge
Yrange=np.arange(0,specLenY,elemLenY)
Xrange=np.ones(np.shape(Yrange))*0
loc=np.transpose(np.array([Xrange,Yrange]))
NLeftEdge= FindNodes(loc,Node)
# Find right edge
Yrange=np.arange(0,specLenY,elemLenY)
Xrange=np.ones(np.shape(Yrange))*specLenX
loc=np.transpose(np.array([Xrange,Yrange]))
NRightEdge= FindNodes(loc,Node)
NBoundary=np.append(NBtmEdge,NRightEdge,axis=0)
NBoundary=np.append(NBoundary,NTopEdge,axis=0)
NBoundary=np.append(NBoundary,NLeftEdge,axis=0)
return NCorners,NBtmEdge,NTopEdge,NLeftEdge,NRightEdge,NBoundary
def DefineElem2D45(Node,NBtmEdge,NTopEdge,NLeftEdge,NRightEdge,shiftX,shiftY):
A=spatial.cKDTree(Node[:,1:3])
# Find nearest
XYPnt=np.array([0.0,0.0])
ElemQuad=np.array([[0,0,0,0,0]])
ElemPyrd= | np.array([[0,0,0,0]]) | numpy.array |
# Imports here
import argparse
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
import os
import json
from torchvision import datasets, transforms, models
from PIL import Image
from torch.autograd import Variable
# Imports here
means = [0.485, 0.456, 0.406]
deviations = [0.229, 0.224, 0.225]
def parse_args():
parser = argparse.ArgumentParser(
description="Trains a network on a dataset of images and saves the model to a checkpoint")
parser.add_argument('data_dir', action="store", help='the data directory to work')
parser.add_argument('check_point', action="store", type=str, help='the save file to load')
parser.add_argument('--topk', default=3, type=int, help='the num to show of possible candidates from top')
parser.add_argument('--json_dir', default='./cat_to_name.json', type=str, help='select the json file path')
parser.add_argument('--gpu', action='store_true', help='set the gpu mode')
args = parser.parse_args()
return args
def process_image(image):
""" Scales, crops, and normalizes a PIL image for a PyTorch model,
# # TODO: Process a PIL image for use in a PyTorch model
# # TODO : First, resize the images where the shortest side is 256 pixels,
# # keeping the aspect ratio. This can be done with the thumbnail or resize methods
# resize_to = 256
# crop_to = 224
# # resize
# image_width, image_height = image.size
# shorter = image_width if image_width < image_height else image_height
# ratio = resize_to / shorter
# resized_width = int(image_width * ratio)
# resized_height = int(image_height * ratio)
# image = image.resize((resized_width, resized_height))
# image = image.crop((c / 2 for c in ((resized_width - crop_to), (resized_height - crop_to),
# (resized_width + crop_to), (resized_height + crop_to))))
# # TODO : Color channels of images are typically encoded as integers 0-255,
# # but the model expected floats 0-1.
# # You'll need to convert the values.
# # It's easiest with a Numpy array, which you can get from a PIL image like so np_image = np.array(pil_image).
# # 0-255 to 0-1
# image = np.array(image)
# image = image / 255.
# # Nomalization
# mean = np.array(means)
# std = np.array(deviations)
# image = (image - mean) / std
# # Transpose
# image = np.transpose(image, (2, 0, 1))
# return image.astype(np.float32)
"""
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
return preprocess(image)
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array(means)
std = np.array(deviations)
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
def predict(image_path, device, model, label_map, topk=5):
''' Implementation of the code to predict the class from an image file'''
image = Image.open(image_path)
image = process_image(image)
image = np.expand_dims(image, 0)
image = torch.from_numpy(image)
model.eval()
image = Variable(image).to(device)
logits = model.forward(image)
result = F.softmax(logits, dim=1)
top_probs, top_labels = result.cpu().topk(topk)
top_probs = top_probs.detach().numpy().tolist()[0]
top_labels = top_labels.detach().numpy().tolist()[0]
idx_to_class = {val: key for key, val in model.class_to_idx.items()}
top_labels = [idx_to_class[lab] for lab in top_labels]
top_flowers = [label_map[lab] for lab in top_labels]
return top_flowers, top_probs
# TODO: Display an image along with the top 5 classes
def view_classify(image, ps, classes):
''' Function for viewing an image and it's predicted classes.
'''
num_classes = len(ps)
ps = np.array(ps)
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = | np.array([0.485, 0.456, 0.406]) | numpy.array |
# Automatically adapted for numpy.oldnumeric Aug 01, 2007 by
#!/usr/bin/env python
import cdms2
import numpy
import os
import sys
import basetest
class TestGenericGrids(basetest.CDMSBaseTest):
def testGenGrids2(self):
latb = [62.47686472, 69.70600048]
lonb = [102.87075526, 105.51598035]
fn = self.getDataFile('sampleCurveGrid4.nc')
s = fn("sample")
g = s.getGrid()
lat = g.getLatitude()
lon = g.getLongitude()
g2 = cdms2.createGenericGrid(lat, lon)
datalat = g2.getLatitude().getBounds()[22, 25]
datalon = g2.getLongitude().getBounds()[22, 25]
self.assertTrue(numpy.ma.allclose(datalat, latb))
self.assertTrue(numpy.ma.allclose(datalon, lonb))
def testGenGrids(self):
datb = numpy.array([693., 694., ])
latb = numpy.array([-26.67690036, -30.99890917, ])
lonb = numpy.array([92.41822415, 94.4512163, ])
f = self.getDataFile('sampleGenGrid3.nc')
# Slice a file variable on a curvilinear grid: by coordinates ...
samp = f['sample']
x = samp(lat=(-32, -25), lon=(90, 95))
self.assertFalse(not numpy.ma.allequal(x.data, datb))
grid = x.getGrid()
self.assertFalse(grid.shape != (2,))
lat = grid.getLatitude()
self.assertFalse(not numpy.ma.allclose(lat.data, latb, atol=1.e-5))
lon = grid.getLongitude()
self.assertFalse(not numpy.ma.allclose(lon.data, lonb, atol=1.e-5))
# ... and by index
y = samp[693:695]
self.assertFalse(not numpy.ma.allequal(y, datb))
grid = y.getGrid()
self.assertFalse(not (grid.shape == (2,)))
lat = grid.getLatitude()
self.assertFalse(not numpy.ma.allclose(lat.data, latb, atol=1.e-5))
lon = grid.getLongitude()
self.assertFalse(not numpy.ma.allclose(lon.data, lonb, atol=1.e-5))
#-------------------------------------------------------------
# Slice a TRANSIENT variable on a curvilinear grid: by coordinates ...
samp = f['sample']
x = samp(lat=(-32, -25), lon=(90, 95))
self.assertFalse(not numpy.ma.allequal(x.data, datb))
grid = x.getGrid()
self.assertFalse(grid.shape != (2,))
lat = grid.getLatitude()
self.assertFalse(not numpy.ma.allclose(lat.data, latb, atol=1.e-5))
lon = grid.getLongitude()
self.assertFalse(not numpy.ma.allclose(lon.data, lonb, atol=1.e-5))
# ... and by index
y = samp[693:695]
self.assertFalse(not numpy.ma.allequal(y, datb))
grid = y.getGrid()
self.assertFalse(not (grid.shape == (2,)))
lat = grid.getLatitude()
self.assertFalse(not numpy.ma.allclose(lat.data, latb, atol=1.e-5))
lon = grid.getLongitude()
self.assertFalse(not numpy.ma.allclose(lon.data, lonb, atol=1.e-5))
#-------------------------------------------------------------
# Computing with variables, coordinate variables
x2 = (9. / 5.) * x + 32.
lat2 = x2.getLatitude()
self.assertFalse(not numpy.ma.allclose(lat.data, latb, atol=1.e-5))
#-------------------------------------------------------------
# Slice a coordinate variable, computation
latsamp = samp.getLatitude()
latx = latsamp(cell=(693, 694))
self.assertFalse(not numpy.ma.allclose(latx.data, latb, atol=1.e-5))
latx = latsamp[693:695]
self.assertFalse(not numpy.ma.allclose(latx.data, latb, atol=1.e-5))
latrad = latsamp * numpy.pi / 180.0
#-------------------------------------------------------------
f = self.getDataFile('cdtest14.xml')
# Slice a DATASET variable on a curvilinear grid: by coordinates ...
samp = f['sample']
x = samp(lat=(-32, -25), lon=(90, 95))
self.assertFalse(not numpy.ma.allequal(x.data, datb))
grid = x.getGrid()
self.assertFalse(grid.shape != (2,))
lat = grid.getLatitude()
self.assertFalse(not numpy.ma.allclose(lat.data, latb, atol=1.e-5))
lon = grid.getLongitude()
self.assertFalse(not numpy.ma.allclose(lon.data, lonb, atol=1.e-5))
# ... and by index
y = samp[693:695]
self.assertFalse(not numpy.ma.allequal(y, datb))
grid = y.getGrid()
self.assertFalse(not (grid.shape == (2,)))
lat = grid.getLatitude()
self.assertFalse(not numpy.ma.allclose(lat.data, latb, atol=1.e-5))
lon = grid.getLongitude()
self.assertFalse(not | numpy.ma.allclose(lon.data, lonb, atol=1.e-5) | numpy.ma.allclose |
from ..lennard_jones import lennard_jones_forces, lennard_jones_potential, all_lennard_jones_forces, all_lennard_jones_potential, lennard_jones
from ..cell_order import create_cell_order_3d, create_cell_order_2d, create_cell_order
import numpy as np
import numpy.testing as npt
"""Tests"""
def test_potential_1d_0():
potential = lennard_jones_potential(0, 1, epsilon=1, sigma=1)
potenital_ref = 0
npt.assert_equal(potential, potenital_ref)
def test_potential_3d_0():
potential = lennard_jones_potential(
np.array([1, 2, 3]), np.array([2, 2, 3]), epsilon=1, sigma=1)
potenital_ref = 0
npt.assert_equal(potential, potenital_ref)
def test_potential_3d():
potential = lennard_jones_potential(
np.array([0, 4, 3]), np.array([0, 0, 0]), epsilon=1, sigma=1)
potenital_ref = -0.00025598361
npt.assert_almost_equal(potential, potenital_ref)
test_potential_1d_0()
test_potential_3d_0()
test_potential_3d
# test forces and directions
def test_force_attractive():
force = lennard_jones_forces(0, 5, epsilon=1, sigma=1)
force_ref = -0.00030716067 * -1
npt.assert_almost_equal(force, force_ref)
def test_force_zero():
force = lennard_jones_forces(0, 2**(1 / 6), epsilon=1, sigma=1)
force_ref = 0
npt.assert_almost_equal(force, force_ref)
def test_force_repulsive():
force = lennard_jones_forces(0, 1, epsilon=1, sigma=1)
force_ref = 24 * -1
npt.assert_almost_equal(force, force_ref)
def test_force_3d():
force = lennard_jones_forces(
np.array([0, 4, 3]), np.array([0, 0, 0]), epsilon=1, sigma=1)
force_ref = -0.00030716067 * np.array([0, 4, 3]) / 5
npt.assert_almost_equal(force, force_ref)
test_force_repulsive()
test_force_zero()
test_force_attractive()
test_force_3d()
def test_forces_2d_1cell():
class TestNl(object):
def __init__(self):
self.head = np.array([1])
self.list = np.array([-1, 0])
cell_order = create_cell_order_2d(1, [1, 1])
test = TestNl()
particle_position_test = np.array([[3, 4], [0, 0]])
forces = all_lennard_jones_forces(
particle_position_test, test, cell_order, epsilon=1, sigma=1)
forces_ref = np.array(
[-0.00030716067 * np.array([3, 4]) / 5,
0.00030716067 * np.array([3, 4]) / 5])
npt.assert_almost_equal(forces, forces_ref)
def test_forces_2d():
class TestNl(object):
"""four cells", two particles"""
def __init__(self):
self.head = np.array([0, -1, -1, 1])
self.list = np.array([-1, -1])
cell_order = create_cell_order_2d(2, [4, 4])
test = TestNl()
particle_position_test = np.array([[0, 0], [3, 4]])
forces = all_lennard_jones_forces(
particle_position_test, test, cell_order, epsilon=1, sigma=1)
forces_ref = np.array(
[0.00030716067 * np.array([3, 4]) / 5,
- 0.00030716067 * np.array([3, 4]) / 5])
npt.assert_almost_equal(forces, forces_ref)
def test_forces_3d():
"""8 cells, two particles"""
class TestNl(object):
def __init__(self):
self.head = np.array([-1, -1, -1, -1, 0, -1, 1, -1])
self.list = np.array([-1, -1, -1, -1, -1, -1, -1, -1])
cell_order = create_cell_order(2, [4, 4, 4])
test = TestNl()
particle_position_test = np.array([[0, 0, 1.5], [3, 4, 1.5]])
forces = all_lennard_jones_forces(
particle_position_test, test, cell_order, epsilon=1, sigma=1)
forces_ref = np.array([0.00030716067 * np.array([3, 4, 0]) /
5, - 0.00030716067 * np.array([3, 4, 0]) / 5])
npt.assert_almost_equal(forces, forces_ref)
def test_forces_3d_three_particles():
"""8 cells, three particles, all in same octant"""
class TestNl(object):
def __init__(self):
self.head = np.array([2, -1, -1, -1, -1, -1, -1, -1])
self.list = np.array([-1, 0, 1])
cell_order = create_cell_order(5, [10, 10, 10])
test = TestNl()
particle_position_test = np.array([[0, 0, 0], [3, 4, 0], [4, 3, 0]])
forces = all_lennard_jones_forces(
particle_position_test, test, cell_order, r_cut=5, epsilon=1, sigma=1)
forces_ref = np.array([0.00030716067 * np.array([3, 4, 0]) / 5
+ 0.00030716067 *
np.array([4, 3, 0]) / 5, - 0.00030716067 *
np.array([3, 4, 0]) / 5
+ 1.5909902576697312 *
np.array([1, -1, 0]) / np.sqrt(2), -
0.00030716067 * np.array([4, 3, 0]) / 5
- 1.5909902576697312 * np.array([1, -1, 0]) / np.sqrt(2)])
npt.assert_almost_equal(forces, forces_ref)
def test_forces_3d_cutoff():
"""8 cells, three particles, all in different octants
and out off cuttoff radius"""
class TestNl(object):
def __init__(self):
self.head = np.array([0, -1, 1, -1, -1, -1, -1, 2])
self.list = np.array([-1, -1, -1])
cell_order = create_cell_order(5, [10, 10, 10])
test = TestNl()
particle_position_test = np.array([[0, 0, 0], [10, 10, 10], [0, 10, 0]])
forces = all_lennard_jones_forces(
particle_position_test, test, cell_order, r_cut=5, epsilon=1, sigma=1)
forces_ref = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
npt.assert_almost_equal(forces, forces_ref)
def test_forces_3d_three_particles_2():
"""8 cells, three particles, all in different octants"""
class TestNl(object):
def __init__(self):
self.head = np.array([0, 2, 1, -1, -1, -1, -1, -1])
self.list = np.array([-1, -1, -1])
cell_order = create_cell_order(5, [10, 10, 10])
test = TestNl()
particle_position_test = np.array([[2, 2, 2], [5, 6, 2], [6, 5, 2]])
forces = all_lennard_jones_forces(
particle_position_test, test, cell_order, r_cut=5, epsilon=1, sigma=1)
forces_ref = np.array([0.00030716067 * np.array([3, 4, 0]) / 5
+ 0.00030716067 *
np.array([4, 3, 0]) / 5, - 0.00030716067 *
np.array([3, 4, 0]) / 5
+ 1.5909902576697312 *
np.array([1, -1, 0]) / np.sqrt(2), -
0.00030716067 * np.array([4, 3, 0]) / 5
- 1.5909902576697312 * np.array([1, -1, 0]) / np.sqrt(2)])
npt.assert_almost_equal(forces, forces_ref)
test_forces_2d_1cell()
test_forces_2d()
test_forces_3d()
test_forces_3d_three_particles()
test_forces_3d_cutoff()
test_forces_3d_three_particles_2()
def test_potential_3d_three_particles():
"""8 cells, three particles, all in same octant"""
class TestNl(object):
def __init__(self):
self.head = np.array([2, -1, -1, -1, -1, -1, -1, -1])
self.list = np.array([-1, 0, 1])
cell_order = create_cell_order(5, [10, 10, 10])
test = TestNl()
particle_position_test = np.array([[0, 0, 0], [3, 4, 0], [4, 3, 0]])
potential = all_lennard_jones_potential(
particle_position_test, test, cell_order, r_cut=5, epsilon=1, sigma=1)
potential_ref = 2 * (-0.00025598361) - 0.4375
npt.assert_almost_equal(potential, potential_ref)
def test_potential_3d_three_particles_cutoff():
"""8 cells, three particles, all in different octants
and out off cuttoff radius."""
class TestNl(object):
def __init__(self):
self.head = np.array([0, 1, 2, -1, -1, -1, -1, -1])
self.list = np.array([-1, -1, -1])
cell_order = create_cell_order(5, [10, 10, 10])
test = TestNl()
particle_position_test = np.array([[0, 0, 0], [10, 0, 0], [0, 10, 0]])
potential = all_lennard_jones_potential(
particle_position_test, test, cell_order, r_cut=5, epsilon=1, sigma=1)
potential_ref = 0
npt.assert_almost_equal(potential, potential_ref)
def test_potential_3d_three_particles_2():
"""8 cells, three particles, all in different octants"""
class TestNl(object):
def __init__(self):
self.head = np.array([0, 2, 1, -1, -1, -1, -1, -1])
self.list = | np.array([-1, -1, -1]) | numpy.array |
import numpy as np
from experiments.GMM import GMM
from scipy.stats import multivariate_normal as normal_pdf
import os
file_path = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.abspath(os.path.join(file_path, os.pardir, os.pardir, os.pardir)) + "/data/"
### Gaussian Mixture Model experiment
def build_GMM_lnpdf(num_dimensions, num_true_components, prior_variance=1e3):
prior = normal_pdf(np.zeros(num_dimensions), prior_variance * np.eye(num_dimensions))
prior_chol = np.sqrt(prior_variance) * np.eye(num_dimensions)
target_mixture = GMM(num_dimensions)
for i in range(0, num_true_components):
this_cov = 0.1 * np.random.normal(0, num_dimensions, (num_dimensions * num_dimensions)).reshape(
(num_dimensions, num_dimensions))
this_cov = this_cov.transpose().dot(this_cov)
this_cov += 1 * np.eye(num_dimensions)
this_mean = 100 * (np.random.random(num_dimensions) - 0.5)
target_mixture.add_component(this_mean, this_cov)
target_mixture.set_weights(np.ones(num_true_components) / num_true_components)
def target_lnpdf(theta, without_prior=False):
target_lnpdf.counter += 1
if without_prior:
return np.squeeze(target_mixture.evaluate(theta, return_log=True) - prior.logpdf(theta))
else:
return np.squeeze(target_mixture.evaluate(theta, return_log=True))
target_lnpdf.counter = 0
return [target_lnpdf, prior, prior_chol, target_mixture]
def build_GMM_lnpdf_autograd(num_dimensions, num_true_components):
import autograd.scipy.stats.multivariate_normal as normal_auto
from autograd.scipy.misc import logsumexp
import autograd.numpy as np
means = np.empty((num_true_components, num_dimensions))
covs = np.empty((num_true_components, num_dimensions, num_dimensions))
for i in range(0, num_true_components):
covs[i] = 0.1 * np.random.normal(0, num_dimensions, (num_dimensions * num_dimensions)).reshape(
(num_dimensions, num_dimensions))
covs[i] = covs[i].transpose().dot(covs[i])
covs[i] += 1 * np.eye(num_dimensions)
means[i] = 100 * ( | np.random.random(num_dimensions) | numpy.random.random |
import numpy as np
def prepareData(train, dev, embeddings):
'''
Almacenamiento de palabras en nuestro vocabulario -> Añadimos al vocabulario
aquellas palabras que estén en el subconjunto de los embeddings seleccionados
(200000) + 2 de padding y unkown
'''
vocabulary = {}
vocabulary["PADDING"] = len(vocabulary)
vocabulary["UNKOWN"] = len(vocabulary)
#Matriz de embeddings del vocabulario
embeddings_matrix = []
embeddings_matrix.append(np.zeros(300))
embeddings_matrix.append(np.random.uniform(-0.25, 0.25, 300))
for word in embeddings.wv.vocab:
vocabulary[word] = len(vocabulary)
#Al mismo tiempo creamos matrix de embeddings
embeddings_matrix.append(embeddings[word])
train_idx = []
dev_idx = []
for sentence in train:
wordIndices = []
for word in sentence:
#Si la palabra está en el vocabulario, asignamos su índice en él
if word in vocabulary:
wordIndices.append(vocabulary[word])
else:
#Padding
if word == "-":
wordIndices.append(vocabulary["PADDING"])
#Desconocida
else:
wordIndices.append(vocabulary["UNKOWN"])
train_idx.append(np.array(wordIndices))
for sentence in dev:
wordIndices = []
for word in sentence:
#Si tenemos embedding para la palabra
if word in vocabulary:
wordIndices.append(vocabulary[word])
else:
#Padding
if word == "-":
wordIndices.append(vocabulary["PADDING"])
#Desconocida
else:
wordIndices.append(vocabulary["UNKOWN"])
dev_idx.append(np.array(wordIndices))
return (train_idx, dev_idx, embeddings_matrix, vocabulary)
def prepareDataTest(data_test, vocabulary):
'''
Preparación de conjunto de test.
'''
data_test_idx = []
for sentence in data_test:
wordIndices = []
for word in sentence:
#Si tenemos embedding para la palabra
if word in vocabulary:
wordIndices.append(vocabulary[word])
else:
#Padding
if word == "-":
wordIndices.append(vocabulary["PADDING"])
#Desconocida
else:
wordIndices.append(vocabulary["UNKOWN"])
data_test_idx.append( | np.array(wordIndices) | numpy.array |
from HelperClass2.MnistImageDataReader import *
from HelperClass2.NeuralNet_3_0 import *
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.optim import Adam
import torch.nn.init as init
import warnings
warnings.filterwarnings('ignore')
train_data_name = "../../Data/ch11.train.npz"
test_data_name = "../../Data/ch11.test.npz"
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.fc1 = nn.Linear(28*28, 64, bias=True)
self.fc2 = nn.Linear(64, 16, bias=True)
self.fc3 = nn.Linear(16, 10, bias=True)
def forward(self, x):
x = F.sigmoid(self.fc1(x))
x = F.sigmoid(self.fc2(x))
x = F.softmax(self.fc3(x))
return x
def _initialize_weights(self):
for m in self.modules():
print(m)
if isinstance(m, nn.Linear):
init.xavier_uniform_(m.weight, gain=1)
print(m.weight)
def metric(pred, label):
'''
:param pred: batch_size * num_classes, numpy array
:param label: [batch_size,]
:return: accuracy
'''
real_len = label.shape[0]
pred_y = np.argmax(pred, axis=1)
return sum(label == pred_y) / real_len
if __name__ == '__main__':
# reading data
dataReader = MnistImageDataReader(mode="vector")
dataReader.ReadData()
dataReader.NormalizeX()
dataReader.NormalizeY(NetType.MultipleClassifier, base=0)
dataReader.Shuffle()
dataReader.GenerateValidationSet(k=12)
# data preprocessing
dataReader.XTrain = np.reshape(dataReader.XTrain, [-1, 28 * 28])
dataReader.YTrain = np.argmax(dataReader.YTrain, axis=1)
dataReader.XDev = np.reshape(dataReader.XDev, [-1, 28 * 28])
dataReader.YDev = | np.argmax(dataReader.YDev, axis=1) | numpy.argmax |
#!/usr/bin/env python
#pip install opencv-python
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.datasets import load_digits
digits = datasets.load_digits()
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix,accuracy_score
import numpy as np
import scipy
import cv2
from fractions import Fraction
from sklearn.utils.multiclass import unique_labels
from matplotlib import pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import validation_curve
def image2Digit(image):
#adjust size to 8*8
im_resized = scipy.misc.imresize(image,(8,8))
#RGB(3D) adjust to gray 1D
im_gray = cv2.cvtColor(im_resized, cv2.COLOR_BGR2GRAY)
#train 0-16, resolutaion - 16/255
im_hex = Fraction(16,255) * im_gray
#reverse to black background and white number
im_reverse = 16 - im_hex
return im_reverse.astype(np.int)
#split training sets
Xtrain, Xtest, ytrain, ytest = train_test_split(digits.data, digits.target, random_state=2)
#create
clf = LogisticRegression(penalty='l2')
#combine
clf.fit(Xtrain, ytrain)
#predict
ypred = clf.predict(Xtest)
#calculate the accuracy
accuracy = accuracy_score(ytest, ypred)
print("Recognition_accuracyLR:",accuracy)
from sklearn import svm
from sklearn.svm import SVC
clf = svm.SVC(gamma=0.001, C=100.)
x,y = digits.data[:-1], digits.target[:-1]
clf.fit(x,y)
#print('Prediction:', clf.predict(digits.data[-1:]))
#plt.imshow(digits.images[-1], cmap=plt.cm.gray_r, interpolation="nearest")
#plt.show()
x = digits.data[:-1]
y = digits.target[:-1]
#split data into test and training set
train_x, test_x, train_y, test_y=train_test_split(x,y,test_size=0.30, random_state=42,stratify=y)
clf.fit(train_x, train_y)
y_predict = clf.predict(test_x)
accuracy_s = accuracy_score(test_y,y_predict)
print("Recognition_accuracySVC:",accuracy_s)
###https://chrisalbon.com/machine_learning/model_evaluation/plot_the_validation_curve/###
# Create feature matrix and target vector
X, y = digits.data, digits.target
# Create range of values for parameter
param_range = | np.arange(1, 250, 2) | numpy.arange |
"""
Created on Wed Nov 7 2018
@author: <NAME> <EMAIL>
<NAME> <EMAIL>
"""
import itertools
from sklearn.metrics import (log_loss, f1_score, accuracy_score, average_precision_score, precision_score,
recall_score, roc_auc_score, mean_squared_error, r2_score)
from sklearn.model_selection import learning_curve
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scikitplot as skplt
def print_metrics(y_t, y_pred_t, mode=''):
"""
Print metrics of various kind
Parameters
----------
y_t :
y_pred_t :
mode : string
"""
print('Model performance on the {} dataset:'.format(mode))
#mse = mean_squared_error(y_t, y_pred_t)
#logloss = log_loss(y_t, y_pred_t)
accuracy = accuracy_score(y_t, y_pred_t)
f1 = f1_score(y_t, y_pred_t)
precision_micro = precision_score(y_t, y_pred_t, average='micro')
precision_macro = precision_score(y_t, y_pred_t, average='macro')
avg_precision = average_precision_score(y_t, y_pred_t)
precision = precision_score(y_t, y_pred_t)
recall = recall_score(y_t, y_pred_t, average='binary')
auc = roc_auc_score(y_t, y_pred_t)
r2 = r2_score(y_t, y_pred_t)
print(' Metric {}'.format(mode.title()))
print('accuracy........... {0:8.4f}'.format(accuracy))
print('recall............. {0:8.4f}'.format(recall))
print('auc................ {0:8.4f}'.format(auc))
print('precision (p=0.5).. {0:8.4f}'.format(precision))
print('precision (avg).... {0:8.4f}'.format(avg_precision))
print('precision (micro).. {0:8.4f}'.format(precision_micro))
print('precision (macro).. {0:8.4f}'.format(precision_macro))
print('f1................. {0:8.4f}'.format(f1))
print('r2................. {0:8.4f}'.format(r2))
print('logloss............ {0:8.4f}'.format(logloss))
print('mse................ {0:8.4f}'.format(mse))
def plot_confusion_matrix(cm, classes,
normalize=False,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
Parameters
----------
cm : matrix
confusion matrix
classes : list
list of names of the classes
normalize : bool, optional
normalizes confusion matrix if True
title : string, optional
title of confusion matrix
cmap : optional
some plot object
default: plt.cm.Blues
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
title = 'Normalized Confusion Matrix'
else:
title = 'Confusion Matrix'
plt.title(title)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.colorbar()
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def print_confusion_matrix(cm, cr, label, mode=''):
"""
Print confusino matrix for binary classification
Parameters
----------
cm : matrix
confusion matrix
cr : string
label :
mode : optional
"""
print('Confusion matrics of the {} data set:\n'.format(mode))
print('confusion matrix: \n {} \n'.format(cm))
true_negative = cm[0, 0]
true_positive = cm[1, 1]
false_negative = cm[1, 0]
false_positive = cm[0, 1]
print('True labels:')
for i, j in zip(np.sum(cm, axis=1), label):
print('{} {:,}'.format(j, i))
print('')
print('Predicted labels:')
for i, j in zip(np.sum(cm, axis=0), label):
print('{} {:,}'.format(j, i))
total = true_negative + true_positive + false_negative + false_positive
accuracy = (true_positive + true_negative) / total
precision = true_positive / (true_positive + false_positive)
recall = true_positive / (true_positive + false_negative)
misclassification_rate = (false_positive + false_negative) / total
f1 = (2 * true_positive) / (2 * true_positive + false_positive + false_negative)
print('\n accuracy................. {0:.4f}'.format(accuracy))
print(' precision................ {0:.4f}'.format(precision))
print(' recall................... {0:.4f}'.format(recall))
print(' misclassification_rate... {0:.4f}'.format(misclassification_rate))
print(' f1....................... {0:.4f}\n'.format(f1))
print('classification report: \n {} \n '.format(cr))
def plot_learning_curve(estimator, title, x, y, ylim=None, cv=None,
n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curves.
Does not work with Keras/Tensorflow
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
x : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
train_sizes: array-like
e.g. np.linspace(.1, 1.0, 5)
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, x, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = | np.std(train_scores, axis=1) | numpy.std |
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 6 04:46:16 2018
@author: Alankar
"""
import numpy as np
def cart_to_pol(x,y):
r = np.sqrt(x**2+y**2)
if (x>0 and y>0):
theta = np.arctan2(np.abs(y),np.abs(x))
elif(x>0 and y<0):
theta = -np.arctan2(np.abs(y),np.abs(x))
elif(x<0 and y>0):
theta = np.pi - np.arctan2(np.abs(y), | np.abs(x) | numpy.abs |
##-------------------------------------------
## function is to evalute the accuracy of detection result
## creaed by Xuesong(Ben) LI, on 22/1/2018
##-------------------------------------------
import numpy as np
from nms_3d import convert_to_list_points
from nms_3d import caculate_3d_overlap
def average_precsion(recall, precesion):
aveg_precision = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(recall >= t) == 0:
p = 0
else:
p = np.max(precesion[recall>=t])
aveg_precision = aveg_precision + p/11.
return aveg_precision
def evaluation_3d( all_pred_boxes, all_gt_boxes, threshold = 0.5 ):
'''
description: process the predication result and get the accuracy
1. putting all gt_boxes together including the index
2. sorting all_boxes according to the confidence
3. checking the every predict_box with gt_box accordingly
4. sum the tp and np and all the gt_box
input: all_pred_boxes and all_gt_boxes are lists.
'''
# change format of all_pred_boxes into type: index,l, w, h, theta, x, y, z, score
num_all_boxes = len(all_pred_boxes)
np_all_boxes = np.zeros([1 ,9])
for i in range(num_all_boxes):
temp_index = np.full((all_pred_boxes[i].shape[0],1), i)
temp_all_ = np.concatenate((temp_index, all_pred_boxes[i]), axis=1)
np_all_boxes = np.append( np_all_boxes, temp_all_, axis=0)
np_all_boxes = np.delete(np_all_boxes, 0, 0)
sorted_ind = np.argsort(-np_all_boxes[:,8])
sorted_scores = np.sort(-np_all_boxes[:,8])
# convertting the gt_box into coordinate list
num_gt = 0
all_gt_boxes_coordinate = []
# caculate the cooridnate list of gt_boxes
for j in range(num_all_boxes):
num_gt = num_gt + all_gt_boxes[j].shape[0]
all_gt_boxes_coordinate.append(convert_to_list_points(all_gt_boxes[j][:,1:8]))
num_ind = sorted_ind.shape[0]
tp = | np.zeros(num_ind) | numpy.zeros |
#%%
# For scientific computing
import numpy as np
import pandas as pd
import scipy.special
import phd.viz
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import seaborn as sns
colors, palette = phd.viz.phd_style()
# Variability in fold-change as parameter change.
def fold_change_oo(Ka, Ki, R, era, eai=4.5, Nns=4.6E6):
'''
computes the gene expression fold change for a simple repression architecture
in the limit where the inducer concentration goes to infinity
Parameters
----------
Ka, Ki : float.
Dissociation constants of the ligand to the active and inactive state
of the repressor respectively.
R : float.
Mean repressor copy number per cell
era : float.
Repressor-DNA binding energy
eai : float.
Energy difference between active and inactive state of repressor
Nns : float.
Number of non-specific binding sites.
Returns
-------
fold-change
'''
return (1 + 1 / (1 + np.exp(-eai) * (Ka / Ki)**2) * R / Nns *
np.exp(-era))**-1
# Let us now define the numerical values for all the needed parameters
era_num = np.array([-15.3, -13.9, -9.7]) # kBT
Ka_num = 139.96 # µM
Ki_num = 0.54 # µM
# Let's now plot the change in fold-change as $K_A$ and $K_I$ vary for
# different energies and repressor copy numbers.
# Factor by which the Ka and Ki are varied
factor = 2
Ka_array = np.logspace(np.log10(Ka_num / factor),
np.log10(Ka_num * factor), 100)
Ki_array = np.logspace( | np.log10(Ki_num / factor) | numpy.log10 |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#Coastal Pioneer WireFollowing Profilers (WFP
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISPM/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISPM/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL335/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL335/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL335/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL335/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL339/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL339/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL339/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL339/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL340/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL340/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL340/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL340/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL374/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL374/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL374/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL374/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL375/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL375/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL375/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL375/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL376/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL376/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL376/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL376/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL379/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL379/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL379/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL379/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL379/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL379/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL379/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL379/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL379/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL380/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL380/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL380/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL380/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL380/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL380/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL380/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL380/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL380/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL387/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL387/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL387/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL387/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL387/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL387/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL387/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL387/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL387/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = | np.array([]) | numpy.array |
from numpy import sqrt,abs,zeros,log,exp,dot,log10,median,atleast_1d,var,shape,pi,where
from scipy.stats import norm
from scipy.linalg import solveh_banded,cholesky_banded
from scipy.special import gammaln,betainc,gammaincc
def lprob2sigma(lprob):
""" translates a log_e(probability) to units of Gaussian sigmas """
if (lprob>-36.):
sigma = norm.ppf(1.-0.5*exp(1.*lprob))
else:
sigma = sqrt( log(2./pi) - 2.*log(8.2) - 2.*lprob )
return float(sigma)
def chol_inverse_diag(t):
""" Computes inverse of matrix given its Cholesky upper Triangular decomposition t.
matrix form: ab[u + i - j, j] == a[i,j] (here u=1)
(quick version: only calculates diagonal and neighboring elements) """
(uu,nrows) = shape(t)
B = | zeros((uu,nrows),dtype='float64') | numpy.zeros |
# Crank-Nicholson (implicit) finite difference method for a wildfire model.
# Code written by <NAME>. Implicit finite difference method derived by <NAME>, <NAME>, <NAME>, and
# <NAME>. 2018-12-10
import numpy as np
from scipy import linalg as la
from matplotlib import pyplot as plt
from matplotlib import animation
from scipy.optimize import fsolve
def conditions(TS1, TS0, # vectors
K1, K2, h, k, A, B, C1, C2, # constants
hT_aj, cT_aj, dT_aj, hT_bj, cT_bj, dT_bj, hS_aj, cS_aj, dS_aj, hS_bj, cS_bj, dS_bj): # functions
"""Return the conditions for the wildfire model.
Returns nonlinear implicit Crank-Nicholson conditions for the wildfire PDE system, derived using center difference
approximations for u_x and midpoint approximation for u_xx. Boundary conditions were derived similarly.
With K1 = k / (2 * h ** 2) and K2 = k * V / (4 * h), the conditions are the following:
for T: [
h hT_aj = (h cT_aj - dT_aj) T1[0] + dT_aj T1[1] # left boundary
`-.
(T1[k] - T0[k]) =
K1 * (T1[k+1] - T1[k] + T1[k-1] + T0[k+1] - T0[k] + T0[k-1])
- K2 * (T1[k+1] - T1[k-1] + T0[k+1] - T0[k-1]) # interior
`-.
h hT_bj = (h cT_bj + dT_bj) T1[-1] + dT_bj T1[-2] # right boundary
], and
for S: [
h hS_aj = (h cS_aj - dS_aj) S1[0] + dS_aj S1[1] # left boundary
`-.
S1[k] - S0[k] = -k * C2 * S1[k] * exp(-B / T1[k])) # interior
`-.
h hS_bj = (h cS_bj + dS_bj) S1[-1] + dS_bj S1[-2] # right boundary
]
Parameters
TS1 (ndarray): The values of T^{n+1} and S^{n+1}
TS0 (ndarray): The values of T^n and S^n
K1 (float): first constant in the equations
K2 (float): second constant in the equations
h (float): spatial difference constant, usually (b - a) / num_x_steps
k (float): temporal difference constant, usually T / num_t_steps
A (float): constant from PDE system
B (float): constant from PDE system
C1 (float): constant from PDE system
C2 (float): constant from PDE system
hT_aj (float): hT_a evaluated at this time step
cT_aj (float): cT_a evaluated at this time step
dT_aj (float): dT_a evaluated at this time step
hT_bj (float): hT_b evaluated at this time step
cT_bj (float): cT_b evaluated at this time step
dT_bj (float): dT_b evaluated at this time step
hS_aj (float): hS_a evaluated at this time step
cS_aj (float): cS_a evaluated at this time step
dS_aj (float): dS_a evaluated at this time step
hS_bj (float): hS_b evaluated at this time step
cS_bj (float): cS_b evaluated at this time step
dS_bj (float): dS_b evaluated at this time step
Returns
(ndarray): The residuals (differences between right- and left-hand sides) of the conditions.
"""
T0, S0 = np.split(TS0, 2)
T1, S1 = np.split(TS1, 2)
# commonly used term, computed beforehand to save time
SeBT = S1[1:-1] * np.exp(-B / T1[1:-1])
# compute Crank-Nicolson conditions on interior for S
S_lhs = S1[1:-1] - S0[1:-1] # S1[k] - S0[k] = -k * C2 * S1[k] * exp(-B / T1[k]))
S_rhs = -k * C2 * SeBT
# calculate boundary conditions for S
Sa_condition = (h * cS_aj - dS_aj) * S1[0] + dS_aj * S1[1]
Sb_condition = (h * cS_bj + dS_bj) * S1[-1] - dS_bj * S1[-2]
# compute Crank-Nicolson conditions on interior for T
T_lhs = T1[1:-1] - T0[1:-1]
K1_term = K1 * (T1[2:] - 2 * T1[1:-1] + T1[:-2] + T0[2:] - 2 * T0[1:-1] + T0[:-2])
K2_term = K2 * (T1[2:] - T1[:-2] + T0[2:] - T0[:-2])
T_rhs = K1_term - K2_term + k * A * (SeBT - C1 * T1[1:-1])
# calculate boundary conditions for T
Ta_condition = (h * cT_aj - dT_aj) * T1[0] + dT_aj * T1[1]
Tb_condition = (h * cT_bj + dT_bj) * T1[-1] - dT_bj * T1[-2]
# return the complete set of conditions for S and T
return np.concatenate((
[h * hS_aj - Sa_condition], # S boundary condition at a
S_lhs - S_rhs, # S interior conditions
[h * hS_bj - Sb_condition], # S boundary condition at b
[h * hT_aj - Ta_condition], # T boundary condition at a
T_lhs - T_rhs, # T interior conditions
[h * hT_bj - Tb_condition] # T boundary condition at b
))
def wildfire_model(a, b, T, N_x, N_t, # constants
T_0, S_0, cT_a, dT_a, hT_a, cT_b, dT_b, hT_b, cS_a, dS_a, hS_a, cS_b, dS_b, hS_b, # functions
A, B, C1, C2, v): # constants
"""Returns a solution to the wildfire PDE system.
Returns a Crank-Nicolson approximation of the solution T(x, t), S(x, t) for the following system:
T_t = T_xx - v * T_x + A(S * exp(-B / T) - C1 * T),
S_t = -C2 * S * exp(-B / T), a <= x <= b, 0 < t <= T
T(x, 0) = T_0(x),
S(x, 0) = S_0(x),
hT_a(t) = cT_a(t) * T(a, t) + dT_a(t) * T_x(a, t),
hT_b(t) = cT_b(t) * T(b, t) + dT_b(t) * T_x(b, t),
hS_a(t) = cS_a(t) * S(a, t) + dS_a(t) * S_x(a, t),
hS_b(t) = cS_b(t) * S(b, t) + dS_b(t) * S_x(b, t).
In the above equations, T corresponds to temperature, S to the amount of available fuel, and v to wind conditions;
A, B, C1, and C2 are constants.
Parameters:
a (float): left spatial endpoint
b (float): right spatial endpoint
T (float): final time value
N_x (int): number of mesh nodes in the spatial dimension
N_t (int): number of mesh nodes in the temporal dimension
T_0 (callable): function specifying the initial condition for T
S_0 (callable): function specifying the initial condition for S
cT_a (callable): function specifying left boundary condition for T
dT_a (callable): function specifying left boundary condition for T
hT_a (callable): function specifying left boundary condition for T
cT_b (callable): function specifying right boundary condition for T
dT_b (callable): function specifying right boundary condition for T
hT_b (callable): function specifying right boundary condition for T
cS_a (callable): function specifying left boundary condition for S
dS_a (callable): function specifying left boundary condition for S
hS_a (callable): function specifying left boundary condition for S
cS_b (callable): function specifying right boundary condition for S
dS_b (callable): function specifying right boundary condition for S
hS_b (callable): function specifying right boundary condition for S
A (float): constant from PDE system
B (float): constant from PDE system
C1 (float): constant from PDE system
C2 (float): constant from PDE system
v (float): constant from PDE system
Returns:
Ts (np.ndarray): finite difference approximation of T(x,t). Ts[j] = T(x,t_j), where j is the index corresponding
to time t_j.
Ss (np.ndarray): finite difference approximation of T(x,t). Ts[j] = T(x,t_j), where j is the index corresponding
to time t_j.
"""
if a >= b:
raise ValueError('a must be less than b')
if T <= 0:
raise ValueError('T must be greater than or equal to zero')
if N_x <= 2:
raise ValueError('N_x must be greater than zero')
if N_t <= 1:
raise ValueError('N_t must be greater than zero')
x, delx = np.linspace(a, b, N_x, retstep=True)
t, delt = np.linspace(0, T, N_t, retstep=True)
# evaluate the boundary condition functions along t
HT_a = hT_a(t)
CT_a = cT_a(t)
DT_a = dT_a(t)
HT_b = hT_b(t)
CT_b = cT_b(t)
DT_b = dT_b(t)
HS_a = hS_a(t)
CS_a = cS_a(t)
DS_a = dS_a(t)
HS_b = hS_b(t)
CS_b = cS_b(t)
DS_b = dS_b(t)
# evaluate the initial condition functions
S_x0 = S_0(x)
T_x0 = T_0(x)
K1 = delt / 2 / delx / delx
K2 = delt * v / 4 / delx
# combine the initial conditions for T and S into one vector
TSs = [np.concatenate((T_x0, S_x0))]
for j in range(1, N_t):
TSs.append(fsolve(conditions,
TSs[-1],
args=(TSs[-1],
K1, K2, delx, delt, A, B, C1, C2,
HT_a[j], CT_a[j], DT_a[j], HT_b[j], CT_b[j], DT_b[j],
HS_a[j], CS_a[j], DS_a[j], HS_b[j], CS_b[j], DS_b[j]
)
))
TSs = np.array(TSs)
Ts, Ss = np.split(np.array(TSs), 2, axis=1)
return Ts, Ss
def test_wildfire_model():
"""With initial conditions
T_0(x) = sech(x)
S_0(x) = tanh(x)
and boundary conditions specified by
cT_a(t) = 1, dT_a(t) = 0, hT_a(t) = T_0(a),
cT_b(t) = 1, dT_b(t) = 0, hT_b(t) = T_0(b),
cS_a(t) = 1, dS_a(t) = 0, hS_a(t) = S_0(a),
cS_b(t) = 1, dS_b(t) = 0, and hS_b(t) = S_0(b).
the solution looks like a fire centered at zero that moves into the supply found in the positive x direction. We
test using this fact. The correct result is displayed as an animation in test_wildfire.mp4.
"""
a = -10
b = 10
T = 1.0
N_x = 100
N_t = 100
T_0 = lambda x: 1 / np.cosh(x)
S_0 = lambda x: np.tanh(x)
cT_a = lambda t: np.ones_like(t) if type(t) == np.ndarray else 1
dT_a = lambda t: np.zeros_like(t) if type(t) == np.ndarray else 0
hT_a = lambda t: T_0(a) * np.ones_like(t) if type(t) == np.ndarray else T_0(a)
cT_b = lambda t: np.ones_like(t) if type(t) == np.ndarray else 1
dT_b = lambda t: np.zeros_like(t) if type(t) == np.ndarray else 0
hT_b = lambda t: T_0(b) * np.ones_like(t) if type(t) == np.ndarray else T_0(b)
cS_a = lambda t: np.ones_like(t) if type(t) == np.ndarray else 1
dS_a = lambda t: np.zeros_like(t) if type(t) == np.ndarray else 0
hS_a = lambda t: S_0(a) * np.ones_like(t) if type(t) == np.ndarray else S_0(a)
cS_b = lambda t: np.ones_like(t) if type(t) == np.ndarray else 1
dS_b = lambda t: np.zeros_like(t) if type(t) == np.ndarray else 0
hS_b = lambda t: S_0(b) * | np.ones_like(t) | numpy.ones_like |
import warnings
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from source.data_handlers import BaseDataset
from source.data_loading import load_VSN_data, load_HAR_dataset
pd.options.mode.chained_assignment = None
class BaseFLDataset:
def __init__(self, config):
self.config = config
self.fl_train_datasets = None
self.centralized_train_dataset = None
self.fl_test_datasets = None
self.dataset_names = None
self.input_dim = None
self.output_dim = None
self.feature_names = None
self.participant_normalizations = {}
self.class_weights = {}
self._initialize_participant_datasets(config)
if self.fl_test_datasets is None:
self.fl_test_datasets = [None for _ in range(len(self))]
warnings.warn("It is not recommended to not define the test-set")
if self.input_dim is None:
raise RuntimeError("The input dimension must be defined")
if self.output_dim is None:
raise RuntimeError("The output dimension must be defined")
if self.feature_names is None:
raise RuntimeError("The feature names must be defined")
def __getitem__(self, index):
return (
self.dataset_names[index],
self.fl_train_datasets[index],
self.fl_test_datasets[index],
)
def __len__(self):
"""
Number of federated datasets which can be distributed to participants
Returns: Integer
"""
return len(self.fl_train_datasets)
def _initialize_participant_datasets(self, config):
raise NotImplementedError
class TabularDataset(BaseFLDataset):
def __init__(self, config):
super(TabularDataset, self).__init__(config)
def _split_x_y(self, df: pd.DataFrame):
raise NotImplementedError
def _initialize_participant_datasets(self, config):
self.fl_train_datasets = []
self.fl_test_datasets = []
self.dataset_names = []
def _get_counts(self, preprocessed_df, split_column):
counts = preprocessed_df[split_column].value_counts().reset_index()
counts.columns = [split_column, "count"]
counts = counts.sort_values(by=["count", split_column], ascending=False)
counts = counts[counts["count"] >= 1.33 * self.config["test_min_samples"]]
return counts
def _fill_normalize_steps(
self, dataset, numerical_columns, categorical_columns, split_column
):
# fill missing values before normalization if enabled
if self.config["fill_missing_before_normalization"]:
dataset = dataset.fillna(float(self.config["missing_value_fill"]))
# normalize
if self.config["normalization_location"] == "global":
num = dataset[numerical_columns]
dataset[numerical_columns] = self._normalize(num)
elif self.config["normalization_location"] == "local":
for participant_name in list(dataset[split_column].unique()):
row_idx = dataset[split_column] == participant_name
num = dataset.loc[row_idx, numerical_columns]
dataset.loc[row_idx, numerical_columns] = self._normalize(num)
else:
pass
# compute the hot-start parameters for each client
for participant_name in list(dataset[split_column].unique()):
row_idx = dataset[split_column] == participant_name
num = dataset.loc[row_idx, numerical_columns]
self.participant_normalizations[participant_name] = {
"means": num.mean().fillna(0),
"stds": num.std().fillna(1),
}
# # fill remaining missing values
preprocessed_dataset = dataset
preprocessed_dataset[numerical_columns] = preprocessed_dataset[
numerical_columns
].fillna(self.config["missing_value_fill"])
preprocessed_dataset[categorical_columns] = preprocessed_dataset[
categorical_columns
].fillna(self.config["missing_value_fill_binary"])
if preprocessed_dataset.isna().any().any():
raise ValueError("Missing values must be filled correctly")
return preprocessed_dataset
def _normalize(self, df):
if self.config["normalization_mode"] == "standardization":
num_normalized = (df - df.mean()) / df.std()
else:
num_normalized = df - df.min()
num_normalized = num_normalized / num_normalized.max()
# num_normalized = (num_normalized - 0.5) * 2
if self.config["clip_standardized"]:
num_normalized = num_normalized.clip(-1.5, 1.5)
return num_normalized
def _create_tabular_federated_dataset(self, df, splits, split_column):
centralized_train_x = None
centralized_train_y = None
for i, x in splits.iterrows():
participant_name = x[split_column]
self.dataset_names.append(participant_name)
subdataset = df[df[split_column] == participant_name]
if len(subdataset) > self.config["max_samples"]:
subdataset = subdataset.sample(self.config["max_samples"])
warnings.warn(
"The number of available samples is reduced for participant "
+ str(participant_name)
)
x, y = self._split_x_y(subdataset)
self.input_dim = x.shape[1]
self.feature_names = x.columns
effective_test_size = int(
max(
self.config["test_split"] * len(y), self.config["test_min_samples"],
)
)
train_x, test_x, train_y, test_y = train_test_split(
x,
y,
test_size=effective_test_size,
stratify=y,
random_state=self.config["seed"],
)
# set the class_weights
self.class_weights[participant_name] = (
(1 / np.array(train_y.value_counts().sort_index()))
/ np.sum(1 / (np.array(train_y.value_counts().sort_index())))
* len(np.unique(train_y))
)
self.fl_train_datasets.append(
BaseDataset(train_x.to_numpy(), train_y.to_numpy())
)
self.fl_test_datasets.append(
BaseDataset(test_x.to_numpy(), test_y.to_numpy())
)
if centralized_train_x is None:
centralized_train_x = train_x
centralized_train_y = train_y
else:
centralized_train_x = pd.concat([centralized_train_x, train_x], axis=0)
centralized_train_y = pd.concat([centralized_train_y, train_y], axis=0)
self.class_weights["centralized"] = (
(1 / np.array(centralized_train_y.value_counts().sort_index()))
/ np.sum(1 / (np.array(centralized_train_y.value_counts().sort_index())))
* len( | np.unique(centralized_train_y) | numpy.unique |
import os
import numpy as np
import tensorflow as tf
import systems.fluid_createTFRecords as fluid_createTFRecords
import systems.em_createTFRecords as em_createTFRecords
from glob import glob as glb
from tqdm import *
FLAGS = tf.app.flags.FLAGS
# Constants describing the input pipline.
tf.app.flags.DEFINE_integer('min_queue_examples', 400,
""" min examples to queue up""")
tf.app.flags.DEFINE_integer('num_preprocess_threads', 2,
""" number of process threads for que runner """)
tf.app.flags.DEFINE_string('data_dir', '/data',
""" base dir for all data""")
tf.app.flags.DEFINE_string('tf_data_dir', '../data',
""" base dir for saving tf records data""")
tf.app.flags.DEFINE_integer('tf_seq_length', 30,
""" seq length of tf saved records """)
def lat_distortions(lat, distortions):
if len(lat.get_shape()) == 5:
lat = tf.cond(distortions[0]>0.50, lambda: tf.reverse(lat, axis=[2]), lambda: lat)
elif len(lat.get_shape()) == 6:
lat = tf.cond(distortions[0]>0.50, lambda: tf.reverse(lat, axis=[2]), lambda: lat)
lat = tf.cond(0.75<distortions[0], lambda: tf.reverse(lat, axis=[3]), lambda: lat)
lat = tf.cond(distortions[0]<0.25, lambda: tf.reverse(lat, axis=[3]), lambda: lat)
return lat
def read_data_fluid(filename_queue, seq_length, shape, num_frames):
# make reader
reader = tf.TFRecordReader()
key, serialized_example = reader.read(filename_queue)
# make feature dict
feature_dict = {}
for i in xrange(FLAGS.tf_seq_length):
feature_dict['flow/frame_' + str(i)] = tf.FixedLenFeature([np.prod( | np.array(shape) | numpy.array |
import os
import time
from os import path as osp
import numpy as np
import torch
import json
from tqdm import tqdm
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from data_glob_speed import *
from transformations import *
from metric import compute_ate_rte
from model_resnet1d import *
_input_channel, _output_channel = 6, 2
_fc_config = {'fc_dim': 512, 'in_dim': 7, 'dropout': 0.5, 'trans_planes': 128}
def WriteList(path, name, folders):
with open(path+"/"+name, 'w') as f:
for folder in folders:
f.writelines(folder+"\n")
f.close()
def GetFolderName(path):
names = os.listdir(path+"/")
folders=[]
for name in names:
if os.path.isdir(os.path.join(os.path.abspath(path), name)):
folders.append(name)
folders.sort()
return folders
def get_model(arch):
if arch == 'resnet18':
network = ResNet1D(_input_channel, _output_channel, BasicBlock1D, [2, 2, 2, 2],
base_plane=64, output_block=FCOutputModule, kernel_size=3, **_fc_config)
elif arch == 'resnet50':
# For 1D network, the Bottleneck structure results in 2x more parameters, therefore we stick to BasicBlock.
_fc_config['fc_dim'] = 1024
network = ResNet1D(_input_channel, _output_channel, BasicBlock1D, [3, 4, 6, 3],
base_plane=64, output_block=FCOutputModule, kernel_size=3, **_fc_config)
elif arch == 'resnet101':
_fc_config['fc_dim'] = 1024
network = ResNet1D(_input_channel, _output_channel, BasicBlock1D, [3, 4, 23, 3],
base_plane=64, output_block=FCOutputModule, **_fc_config)
else:
raise ValueError('Invalid architecture: ', args.arch)
return network
def run_test(network, data_loader, device, eval_mode=True):
targets_all = []
preds_all = []
if eval_mode:
network.eval()
for bid, (feat, targ, _, _) in tqdm(enumerate(data_loader)):
pred = network(feat.to(device)).cpu().detach().numpy()
targets_all.append(targ.detach().numpy())
preds_all.append(pred)
targets_all = np.concatenate(targets_all, axis=0)
preds_all = np.concatenate(preds_all, axis=0)
return targets_all, preds_all
def add_summary(writer, loss, step, mode):
names = '{0}_loss/loss_x,{0}_loss/loss_y,{0}_loss/loss_z,{0}_loss/loss_sin,{0}_loss/loss_cos'.format(
mode).split(',')
for i in range(loss.shape[0]):
writer.add_scalar(names[i], loss[i], step)
writer.add_scalar('{}_loss/avg'.format(mode), np.mean(loss), step)
def get_dataset(root_dir, data_list, args, **kwargs):
mode = kwargs.get('mode', 'train')
random_shift, shuffle, transforms, grv_only = 0, False, None, False
if mode == 'train':
random_shift = args.step_size // 2
shuffle = True
transforms = RandomHoriRotate(math.pi * 2)
elif mode == 'val':
shuffle = True
elif mode == 'test':
shuffle = False
grv_only = True
if args.dataset == 'ronin':
seq_type = GlobSpeedSequence
elif args.dataset == 'ridi':
from data_ridi import RIDIGlobSpeedSequence
seq_type = RIDIGlobSpeedSequence
elif args.dataset == 'sense':
seq_type = SenseINSSequence
dataset = StridedSequenceDataset(
seq_type, root_dir, data_list, args.cache_path, args.step_size, args.window_size,
random_shift=random_shift, transform=transforms,
shuffle=shuffle, grv_only=grv_only, max_ori_error=args.max_ori_error, args=args)
global _input_channel, _output_channel
_input_channel, _output_channel = dataset.feature_dim, dataset.target_dim
return dataset
def get_dataset_from_list(root_dir, list_path, args, **kwargs):
with open(list_path) as f:
data_list = [s.strip().split(',' or ' ')[0] for s in f.readlines() if len(s) > 0 and s[0] != '#']
return get_dataset(root_dir, data_list, args, **kwargs)
def train(args, **kwargs):
# Loading data
start_t = time.time()
train_dataset = get_dataset_from_list(args.root_dir, args.train_list, args, mode='train')
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
end_t = time.time()
print('Training set loaded. Feature size: {}, target size: {}. Time usage: {:.3f}s'.format(
train_dataset.feature_dim, train_dataset.target_dim, end_t - start_t))
val_dataset, val_loader = None, None
if args.val_list is not None:
val_dataset = get_dataset_from_list(args.validation_dir, args.val_list, args, mode='val')
val_loader = DataLoader(val_dataset, batch_size=512, shuffle=True)
device = torch.device('cuda:0' if torch.cuda.is_available() and not args.cpu else 'cpu')
summary_writer = None
if args.out_dir is not None:
if not osp.isdir(args.out_dir):
os.makedirs(args.out_dir)
write_config(args)
if not osp.isdir(osp.join(args.out_dir, 'checkpoints')):
os.makedirs(osp.join(args.out_dir, 'checkpoints'))
if not osp.isdir(osp.join(args.out_dir, 'logs')):
os.makedirs(osp.join(args.out_dir, 'logs'))
global _fc_config
_fc_config['in_dim'] = args.window_size // 32 + 1
network = get_model(args.arch).to(device)
print('Number of train samples: {}'.format(len(train_dataset)))
if val_dataset:
print('Number of val samples: {}'.format(len(val_dataset)))
total_params = network.get_num_params()
print('Total number of parameters: ', total_params)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(network.parameters(), args.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=10, verbose=True, eps=1e-12)
start_epoch = 0
if args.continue_from is not None and osp.exists(args.continue_from):
checkpoints = torch.load(args.continue_from)
start_epoch = checkpoints.get('epoch', 0)
network.load_state_dict(checkpoints.get('model_state_dict'))
optimizer.load_state_dict(checkpoints.get('optimizer_state_dict'))
if args.out_dir is not None and osp.exists(osp.join(args.out_dir, 'logs')):
summary_writer = SummaryWriter(osp.join(args.out_dir, 'logs'))
summary_writer.add_text('info', 'total_param: {}'.format(total_params))
step = 0
best_val_loss = np.inf
print('Start from epoch {}'.format(start_epoch))
total_epoch = start_epoch
train_losses_all, val_losses_all = [], []
# Get the initial loss.
init_train_targ, init_train_pred = run_test(network, train_loader, device, eval_mode=False)
init_train_loss = np.mean((init_train_targ - init_train_pred) ** 2, axis=0)
train_losses_all.append(np.mean(init_train_loss))
print('-------------------------')
print('Init: average loss: {}/{:.6f}'.format(init_train_loss, train_losses_all[-1]))
if summary_writer is not None:
add_summary(summary_writer, init_train_loss, 0, 'train')
if val_loader is not None:
init_val_targ, init_val_pred = run_test(network, val_loader, device)
init_val_loss = np.mean((init_val_targ - init_val_pred) ** 2, axis=0)
val_losses_all.append(np.mean(init_val_loss))
print('Validation loss: {}/{:.6f}'.format(init_val_loss, val_losses_all[-1]))
if summary_writer is not None:
add_summary(summary_writer, init_val_loss, 0, 'val')
try:
for epoch in range(start_epoch, args.epochs):
print('-------------------------')
start_t = time.time()
network.train()
train_outs, train_targets = [], []
for batch_id, (feat, targ, _, _) in tqdm(enumerate(train_loader)):
feat, targ = feat.to(device), targ.to(device)
optimizer.zero_grad()
pred = network(feat)
train_outs.append(pred.cpu().detach().numpy())
train_targets.append(targ.cpu().detach().numpy())
loss = criterion(pred, targ)
loss = torch.mean(loss)
loss.backward()
optimizer.step()
step += 1
train_outs = np.concatenate(train_outs, axis=0)
train_targets = np.concatenate(train_targets, axis=0)
train_losses = np.average((train_outs - train_targets) ** 2, axis=0)
end_t = time.time()
print('Epoch {}, time usage: {:.3f}s, average loss: {}/{:.6f}'.format(
epoch, end_t - start_t, train_losses, np.average(train_losses)))
print('Learning rate: {}'.format(optimizer.param_groups[0]['lr']))
train_losses_all.append(np.average(train_losses))
if summary_writer is not None:
add_summary(summary_writer, train_losses, epoch + 1, 'train')
summary_writer.add_scalar('optimizer/lr', optimizer.param_groups[0]['lr'], epoch)
if val_loader is not None:
network.eval()
val_outs, val_targets = run_test(network, val_loader, device)
val_losses = np.average((val_outs - val_targets) ** 2, axis=0)
avg_loss = np.average(val_losses)
print('Validation loss: {}/{:.6f}'.format(val_losses, avg_loss))
scheduler.step(avg_loss)
if summary_writer is not None:
add_summary(summary_writer, val_losses, epoch + 1, 'val')
val_losses_all.append(avg_loss)
if avg_loss < best_val_loss:
best_val_loss = avg_loss
if args.out_dir and osp.isdir(args.out_dir):
model_path = osp.join(args.out_dir, 'checkpoints', 'checkpoint_%d.pt' % epoch)
torch.save({'model_state_dict': network.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict()}, model_path)
print('Model saved to ', model_path)
else:
if args.out_dir is not None and osp.isdir(args.out_dir):
model_path = osp.join(args.out_dir, 'checkpoints', 'checkpoint_%d.pt' % epoch)
torch.save({'model_state_dict': network.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict()}, model_path)
print('Model saved to ', model_path)
total_epoch = epoch
except KeyboardInterrupt:
print('-' * 60)
print('Early terminate')
print('Training complete')
if args.out_dir:
model_path = osp.join(args.out_dir, 'checkpoints', 'checkpoint_latest.pt')
torch.save({'model_state_dict': network.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'epoch': total_epoch}, model_path)
print('Checkpoint saved to ', model_path)
return train_losses_all, val_losses_all
def recon_traj_with_preds(dataset, preds, seq_id=0, **kwargs):
"""
Reconstruct trajectory with predicted global velocities.
"""
ts = dataset.ts[seq_id]
ind = np.array([i[1] for i in dataset.index_map if i[0] == seq_id], dtype=int)
dts = np.mean(ts[ind[1:]] - ts[ind[:-1]])
# pos = np.zeros([preds.shape[0] + 2, 2])
# pos[0] = dataset.gt_pos[seq_id][0, :2]
pos = np.zeros([preds.shape[0] + 2, 3])
pos[0] = dataset.gt_pos[seq_id][0, :3]
# pos[1:-1] = np.cumsum(preds[:, :2] * dts, axis=0) + pos[0]
pos[1:-1] = np.cumsum(preds[:, :3] * dts, axis=0) + pos[0]
pos[-1] = pos[-2]
ts_ext = np.concatenate([[ts[0] - 1e-06], ts[ind], [ts[-1] + 1e-06]], axis=0)
pos = interp1d(ts_ext, pos, axis=0)(ts)
return pos
def test_sequence(args):
if args.test_path is not None:
if args.test_path[-1] == '/':
args.test_path = args.test_path[:-1]
root_dir = osp.split(args.test_path)[0]
test_data_list = [osp.split(args.test_path)[1]]
elif args.test_list is not None:
root_dir = args.root_dir
with open(args.test_list) as f:
test_data_list = [s.strip().split(',' or ' ')[0] for s in f.readlines() if len(s) > 0 and s[0] != '#']
else:
raise ValueError('Either test_path or test_list must be specified.')
if args.out_dir is not None and not osp.isdir(args.out_dir):
os.makedirs(args.out_dir)
if not torch.cuda.is_available() or args.cpu:
device = torch.device('cpu')
checkpoint = torch.load(args.model_path, map_location=lambda storage, location: storage)
else:
device = torch.device('cuda:0')
checkpoint = torch.load(args.model_path)
# Load the first sequence to update the input and output size
_ = get_dataset(root_dir, [test_data_list[0]], args)
global _fc_config
_fc_config['in_dim'] = args.window_size // 32 + 1
network = get_model(args.arch)
network.load_state_dict(checkpoint['model_state_dict'])
network.eval().to(device)
print('Model {} loaded to device {}.'.format(args.model_path, device))
preds_seq, targets_seq, losses_seq, ate_all, t_rte_all, d_rte_all = [], [], [], [], [], []
traj_lens = []
pred_per_min = 200 * 60 # hard code here
for data in test_data_list:
seq_dataset = get_dataset(root_dir, [data], args, mode='test')
seq_loader = DataLoader(seq_dataset, batch_size=1024, shuffle=False)
ind = np.array([i[1] for i in seq_dataset.index_map if i[0] == 0], dtype=int)
targets, preds = run_test(network, seq_loader, device, True)
losses = np.mean((targets - preds) ** 2, axis=0)
preds_seq.append(preds)
targets_seq.append(targets)
losses_seq.append(losses)
# pos_pred = recon_traj_with_preds(seq_dataset, preds)[:, :2]
# pos_gt = seq_dataset.gt_pos[0][:, :2]
pos_pred = recon_traj_with_preds(seq_dataset, preds)[:, :3]
pos_gt = seq_dataset.gt_pos[0][:, :3]
traj_lens.append(np.sum(np.linalg.norm(pos_gt[1:] - pos_gt[:-1], axis=1)))
ate, t_rte, d_rte = compute_ate_rte(pos_pred, pos_gt, pred_per_min)
ate_all.append(ate)
t_rte_all.append(t_rte)
d_rte_all.append(d_rte)
pos_cum_error = np.linalg.norm(pos_pred - pos_gt, axis=1)
print('Sequence {}, loss {} / {}, ate {:.6f}, t_rte {:.6f}, d_rte {:.6f}'.format(data, losses, np.mean(losses), ate, t_rte, d_rte))
# Plot figures
kp = preds.shape[1]
if kp == 2:
targ_names = ['vx', 'vy']
elif kp == 3:
targ_names = ['vx', 'vy', 'vz']
plt.figure('{}'.format(data), figsize=(16, 9))
plt.subplot2grid((kp, 2), (0, 0), rowspan=kp - 1)
plt.plot(pos_pred[:, 0], pos_pred[:, 1])
plt.plot(pos_gt[:, 0], pos_gt[:, 1])
# plt.plot(pos_pred[:, 0], pos_pred[:, 1], pos_pred[:, 2])
# plt.plot(pos_gt[:, 0], pos_gt[:, 1], pos_pred[:, 2])
plt.title(data)
plt.axis('equal')
plt.legend(['Predicted', 'Ground truth'])
plt.subplot2grid((kp, 2), (kp - 1, 0))
plt.plot(pos_cum_error)
plt.legend(['ATE:{:.3f}, T_RTE:{:.3f}, D_RTE:{:.3f}'.format(ate_all[-1], t_rte_all[-1], d_rte_all[-1])])
for i in range(kp):
plt.subplot2grid((kp, 2), (i, 1))
plt.plot(ind, preds[:, i])
plt.plot(ind, targets[:, i])
plt.legend(['Predicted', 'Ground truth'])
plt.title('{}, error: {:.6f}'.format(targ_names[i], losses[i]))
plt.tight_layout()
if args.show_plot:
plt.show()
if args.out_dir is not None and osp.isdir(args.out_dir):
# np.save(osp.join(args.out_dir, data + '_gsn.npy'),
# np.concatenate([pos_pred[:, :3], pos_gt[:, :2]], axis=1))
np.save(osp.join(args.out_dir, data + '_gsn.npy'),
np.concatenate([pos_pred[:, :3], pos_gt[:, :3]], axis=1))
plt.savefig(osp.join(args.out_dir, data + '_gsn.png'))
plt.close('all')
losses_seq = np.stack(losses_seq, axis=0)
losses_avg = np.mean(losses_seq, axis=1)
# Export a csv file
if args.out_dir is not None and osp.isdir(args.out_dir):
with open(osp.join(args.out_dir, 'losses.csv'), 'w') as f:
if losses_seq.shape[1] == 2:
f.write('seq,vx,vy,avg,ate,t_rte,d_rte\n')
else:
f.write('seq,vx,vy,vz,avg,ate,t_rte,d_rte\n')
for i in range(losses_seq.shape[0]):
f.write('{},'.format(test_data_list[i]))
for j in range(losses_seq.shape[1]):
f.write('{:.6f},'.format(losses_seq[i][j]))
f.write('{:.6f},{:6f},{:.6f},{:.6f}\n'.format(losses_avg[i], ate_all[i], t_rte_all[i], d_rte_all[i]))
print('----------\nOverall loss: {}/{}, avg ATE:{}, avg T_RTE:{}, avg D_RTE:{}'.format(
np.average(losses_seq, axis=0), np.average(losses_avg), np.mean(ate_all), np.mean(t_rte_all), | np.mean(d_rte_all) | numpy.mean |
import os
import sys
sys.path.append("..")
from utils import Logger
from algorithms import *
from scipy.sparse import csr_matrix
import numpy as np
class Bayes:
def predict(self,X,label,results,param=None):
Naive_param_np ,prior_p,prior_n =results["param"]
# print(prior ,"prior ")
n=np.arange(X.shape[1])
nn=np.repeat(n,X.shape[0],0)
X_1=X.astype(int)
# print(Naive_param_np[0:4,0:10],"Naive_param_np")
# print(X_1[0:10,0:10],"X_1")
pos=Naive_param_np[X_1,n]
neg=Naive_param_np[X_1+2,n]
# print(pos[0:10,0:10],"pos")
pos_prob=np.sum(np.log(pos),1)+np.sum( | np.log(prior_p) | numpy.log |
from tqdm import tqdm
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from scipy.optimize import fmin_slsqp
from toolz import partial
from sklearn.model_selection import KFold, TimeSeriesSplit, RepeatedKFold
from sklearn.linear_model import ElasticNetCV, LassoCV, RidgeCV
from bayes_opt import BayesianOptimization
class Optimize(object):
####
# Synthetic Difference in Differences (SDID)
####
def est_zeta(self, Y_pre_c) -> float:
"""
# SDID
Parameter to adjust the L2 penalty term
"""
return (self.n_treat * self.n_post_term) ** (1 / 4) * np.std(
Y_pre_c.diff().dropna().values
)
def est_omega(self, Y_pre_c, Y_pre_t, zeta):
"""
# SDID
estimating omega
"""
Y_pre_t = Y_pre_t.copy()
n_features = Y_pre_c.shape[1]
nrow = Y_pre_c.shape[0]
_w = np.repeat(1 / n_features, n_features)
_w0 = 1
start_w = np.append(_w, _w0)
if type(Y_pre_t) == pd.core.frame.DataFrame:
Y_pre_t = Y_pre_t.mean(axis=1)
# Required to have non negative values
max_bnd = abs(Y_pre_t.mean()) * 2
w_bnds = tuple(
(0, 1) if i < n_features else (max_bnd * -1, max_bnd)
for i in range(n_features + 1)
)
caled_w = fmin_slsqp(
partial(self.l2_loss, X=Y_pre_c, y=Y_pre_t, zeta=zeta, nrow=nrow),
start_w,
f_eqcons=lambda x: np.sum(x[:n_features]) - 1,
bounds=w_bnds,
disp=False,
)
return caled_w
def est_lambda(self, Y_pre_c, Y_post_c):
"""
# SDID
estimating lambda
"""
Y_pre_c_T = Y_pre_c.T
Y_post_c_T = Y_post_c.T
n_pre_term = Y_pre_c_T.shape[1]
_lambda = np.repeat(1 / n_pre_term, n_pre_term)
_lambda0 = 1
start_lambda = np.append(_lambda, _lambda0)
if type(Y_post_c_T) == pd.core.frame.DataFrame:
Y_post_c_T = Y_post_c_T.mean(axis=1)
max_bnd = abs(Y_post_c_T.mean()) * 2
lambda_bnds = tuple(
(0, 1) if i < n_pre_term else (max_bnd * -1, max_bnd)
for i in range(n_pre_term + 1)
)
caled_lambda = fmin_slsqp(
partial(self.l2_loss, X=Y_pre_c_T, y=Y_post_c_T, zeta=0, nrow=0),
start_lambda,
f_eqcons=lambda x: np.sum(x[:n_pre_term]) - 1,
bounds=lambda_bnds,
disp=False,
)
return caled_lambda[:n_pre_term]
def l2_loss(self, W, X, y, zeta, nrow) -> float:
"""
Loss function with L2 penalty
"""
if type(y) == pd.core.frame.DataFrame:
y = y.mean(axis=1)
_X = X.copy()
_X["intersept"] = 1
return np.sum((y - _X.dot(W)) ** 2) + nrow * zeta ** 2 * np.sum(W[:-1] ** 2)
####
# Synthetic Control Method (SC)
####
def rmse_loss(self, W, X, y, intersept=True) -> float:
if type(y) == pd.core.frame.DataFrame:
y = y.mean(axis=1)
_X = X.copy()
if intersept:
_X["intersept"] = 1
return np.mean(np.sqrt((y - _X.dot(W)) ** 2))
def rmse_loss_with_V(self, W, V, X, y) -> float:
if type(y) == pd.core.frame.DataFrame:
y = y.mean(axis=1)
_rss = (y - X.dot(W)) ** 2
_n = len(y)
_importance = np.zeros((_n, _n))
np.fill_diagonal(_importance, V)
return np.sum(_importance @ _rss)
def _v_loss(self, V, X, y, return_loss=True):
Y_pre_t = self.Y_pre_t.copy()
n_features = self.Y_pre_c.shape[1]
_w = np.repeat(1 / n_features, n_features)
if type(Y_pre_t) == pd.core.frame.DataFrame:
Y_pre_t = Y_pre_t.mean(axis=1)
w_bnds = tuple((0, 1) for i in range(n_features))
_caled_w = fmin_slsqp(
partial(self.rmse_loss_with_V, V=V, X=X, y=y),
_w,
f_eqcons=lambda x: np.sum(x) - 1,
bounds=w_bnds,
disp=False,
)
if return_loss:
return self.rmse_loss(_caled_w, self.Y_pre_c, Y_pre_t, intersept=False)
else:
return _caled_w
def estimate_v(self, additional_X, additional_y):
_len = len(additional_X)
_v = np.repeat(1 / _len, _len)
caled_v = fmin_slsqp(
partial(self._v_loss, X=additional_X, y=additional_y),
_v,
f_eqcons=lambda x: np.sum(x) - 1,
bounds=tuple((0, 1) for i in range(_len)),
disp=False,
)
return caled_v
def est_omega_ADH(
self, Y_pre_c, Y_pre_t, additional_X=pd.DataFrame(), additional_y=pd.DataFrame()
):
"""
# SC
estimating omega for synthetic control method (not for synthetic diff.-in-diff.)
"""
Y_pre_t = Y_pre_t.copy()
n_features = Y_pre_c.shape[1]
nrow = Y_pre_c.shape[0]
_w = np.repeat(1 / n_features, n_features)
if type(Y_pre_t) == pd.core.frame.DataFrame:
Y_pre_t = Y_pre_t.mean(axis=1)
# Required to have non negative values
w_bnds = tuple((0, 1) for i in range(n_features))
if len(additional_X) == 0:
caled_w = fmin_slsqp(
partial(self.rmse_loss, X=Y_pre_c, y=Y_pre_t, intersept=False),
_w,
f_eqcons=lambda x: np.sum(x) - 1,
bounds=w_bnds,
disp=False,
)
return caled_w
else:
assert additional_X.shape[1] == Y_pre_c.shape[1]
if type(additional_y) == pd.core.frame.DataFrame:
additional_y = additional_y.mean(axis=1)
# normalized
temp_df = pd.concat([additional_X, additional_y], axis=1)
ss = StandardScaler()
ss_df = pd.DataFrame(
ss.fit_transform(temp_df), columns=temp_df.columns, index=temp_df.index
)
ss_X = ss_df.iloc[:, :-1]
ss_y = ss_df.iloc[:, -1]
add_X = pd.concat([Y_pre_c, ss_X])
add_y = pd.concat([Y_pre_t, ss_y])
self.caled_v = self.estimate_v(additional_X=add_X, additional_y=add_y)
return self._v_loss(self.caled_v, X=add_X, y=add_y, return_loss=False)
#####
# cv search for zeta
####
def _zeta_given_cv_loss_inverse(self, zeta, cv=5, split_type="KFold"):
return -1 * self._zeta_given_cv_loss(zeta, cv, split_type)[0]
def _zeta_given_cv_loss(self, zeta, cv=5, split_type="KFold"):
nrow = self.Y_pre_c.shape[0]
if split_type == "KFold":
kf = KFold(n_splits=cv, random_state=self.random_seed)
elif split_type == "TimeSeriesSplit":
kf = TimeSeriesSplit(n_splits=cv)
elif split_type == "RepeatedKFold":
_cv = max(2, int(cv / 2))
kf = RepeatedKFold(
n_splits=_cv, n_repeats=_cv, random_state=self.random_seed
)
loss_result = []
nf_result = []
for train_index, test_index in kf.split(self.Y_pre_c, self.Y_pre_t):
train_w = self.est_omega(
self.Y_pre_c.iloc[train_index], self.Y_pre_t.iloc[train_index], zeta
)
nf_result.append(np.sum(np.round(np.abs(train_w), 3) > 0) - 1)
loss_result.append(
self.rmse_loss(
train_w,
self.Y_pre_c.iloc[test_index],
self.Y_pre_t.iloc[test_index],
)
)
return np.mean(loss_result), np.mean(nf_result)
def grid_search_zeta(
self, cv=5, n_candidate=20, candidate_zata=[], split_type="KFold"
):
"""
Search for zeta using grid search instead of theoretical values
"""
if len(candidate_zata) == 0:
for _z in np.linspace(0.1, self.base_zeta * 2, n_candidate):
candidate_zata.append(_z)
candidate_zata.append(self.base_zeta)
candidate_zata.append(0)
candidate_zata = sorted(candidate_zata)
result_loss_dict = {}
result_nf_dict = {}
print("cv: zeta")
for _zeta in tqdm(candidate_zata):
result_loss_dict[_zeta], result_nf_dict[_zeta] = self._zeta_given_cv_loss(
_zeta, cv=cv, split_type=split_type
)
loss_sorted = sorted(result_loss_dict.items(), key=lambda x: x[1])
return loss_sorted[0]
def bayes_opt_zeta(
self,
cv=5,
init_points=5,
n_iter=5,
zeta_max=None,
zeta_min=None,
split_type="KFold",
):
"""
Search for zeta using Bayesian Optimization instead of theoretical values
"""
if zeta_max == None:
zeta_max = self.base_zeta * 1.02
zeta_max2 = self.base_zeta * 2
if zeta_min == None:
zeta_min = self.base_zeta * 0.98
zeta_min2 = 0.01
pbounds = {"zeta": (zeta_min, zeta_max)}
optimizer = BayesianOptimization(
f=partial(self._zeta_given_cv_loss_inverse, cv=cv, split_type=split_type),
pbounds=pbounds,
random_state=self.random_seed,
)
optimizer.maximize(
init_points=2,
n_iter=2,
)
optimizer.set_bounds(new_bounds={"zeta": (zeta_min2, zeta_max2)})
optimizer.maximize(
init_points=init_points,
n_iter=n_iter,
)
optimizer.max["params"]["zeta"]
return (optimizer.max["params"]["zeta"], optimizer.max["target"] * -1)
#####
# The following is for sparse estimation
####
def est_omega_ElasticNet(self, Y_pre_c, Y_pre_t):
Y_pre_t = Y_pre_t.copy()
if type(Y_pre_t) == pd.core.frame.DataFrame:
Y_pre_t = Y_pre_t.mean(axis=1)
# Y_pre_t.columns = "treatment_group"
regr = ElasticNetCV(cv=5, random_state=0)
regr.fit(Y_pre_c, Y_pre_t)
self.elastic_net_alpha = regr.alpha_
caled_w = regr.coef_
return np.append(caled_w, regr.intercept_)
def est_omega_Lasso(self, Y_pre_c, Y_pre_t):
Y_pre_t = Y_pre_t.copy()
if type(Y_pre_t) == pd.core.frame.DataFrame:
Y_pre_t = Y_pre_t.mean(axis=1)
regr = LassoCV(cv=5, random_state=0)
regr.fit(Y_pre_c, Y_pre_t)
self.lasso_alpha = regr.alpha_
caled_w = regr.coef_
return | np.append(caled_w, regr.intercept_) | numpy.append |
"""
Module: LMR_verify_proxy_plot.py
Purpose: Plotting of summary statistics from proxy-based verification. Both proxy
chronologies that were assimilated to create reconstructions and those witheld for
independent verification are considered.
Input: Reads .pckl files containing verification data generated by running the
LMR_verify_proxy.py script.
Originator: <NAME> | Dept. of Atmospheric Sciences, Univ. of Washington
| October 2015
Revisions:
- Histogram plots now include overlays of distributions derived from kernel
(gaussian) density estimation, as well as results from a 2-sided
Kolmogorov-Smirnov significance test on distributions representing results
over two different time periods [<NAME> - U. of Washington, Dec. 2017]
- Added the production of plots showing results per individual proxy records.
[<NAME> - U. of Washington, March 2018]
"""
import os
import numpy as np
import pickle
from time import time
from os.path import join
from scipy import stats
import statsmodels.api as sm
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.colors import from_levels_and_colors
from mpl_toolkits.basemap import Basemap
# =========================================================================================
# START: set user parameters here
# =========================================================================================
# ------------------------------
# Section 1: Plotting parameters
# ------------------------------
make_plots_hist = True
make_plots_maps = False
make_plots_individual_sites = False
make_pdfs = False
# set the default size of the figure in inches. ['figure.figsize'] = width, height;
plt.rcParams['figure.figsize'] = 10, 8 # that's default image size for this interactive session
plt.rcParams['axes.linewidth'] = 2.0 #set the value globally
plt.rcParams['font.weight'] = 'bold' #set the font weight globally
plt.rcParams['font.size'] = 11 #set the font size globally
#plt.rc('text', usetex=True)
plt.rc('text', usetex=False)
plt.style.use('ggplot')
# Histogram plotting parameters
binwidth = 0.05
CORRrange = [-1,1]
CErange = [-1,1]
CEchangerange = [-1,1]
alpha = 0.25
fcolor = ['blue', 'red']
# -------------------------
# Section 2: Proxy datasets
# -------------------------
#proxies = 'PAGES2kv1'
proxies = 'LMRdb'
# Assign symbol to proxy types for plotting: dependent on proxy database used.
if proxies == 'PAGES2kv1':
# PAGES2kv1 proxies
proxy_verif = {\
'Tree ring_Width' :'o',\
'Tree ring_Density' :'s',\
'Ice core_d18O' :'v',\
'Ice core_d2H' :'^',\
'Ice core_Accumulation' :'D',\
'Coral_d18O' :'p',\
'Coral_Luminescence' :'8',\
'Lake sediment_All' :'<',\
'Marine sediment_All' :'>',\
'Speleothem_All' :'h',\
}
elif proxies == 'LMRdb':
# LMRdb proxies
proxy_verif = {\
'Tree Rings_WoodDensity' :'s',\
'Tree Rings_WidthPages' :'o',\
'Tree Rings_WidthPages2' :'o',\
'Tree Rings_WidthBreit' :'o',\
'Tree Rings_Isotopes' :'*',\
'Corals and Sclerosponges_d18O' :'p',\
'Corals and Sclerosponges_SrCa' :'h',\
'Corals and Sclerosponges_Rates':'D',\
'Ice Cores_d18O' :'v',\
'Ice Cores_dD' :'^',\
'Ice Cores_Accumulation' :'D',\
'Ice Cores_MeltFeature' :'d',\
'Lake Cores_Varve' :'<',\
'Lake Cores_Misc' :'>',\
'Lake Cores_BioMarkers' :'>',\
'Lake Cores_GeoChem' :'^',\
'Marine Cores_d18O' :'H',\
'Bivalve_d18O' :'8',\
'Speleothems_d18O' :'h',\
}
else:
raise SystemExit('ERROR in the especification of the proxy dataset to be considered. Exiting!')
# Only keep proxy sites for which the linear PSM has a correlation >= than this value
r_crit = 0.0
#r_crit = 0.2
# ------------------------------------
# Section 3: Directories & experiments
# ------------------------------------
#datadir_input = '/home/disk/ekman4/rtardif/LMR/output'
datadir_input = '/home/disk/kalman3/rtardif/LMR/output'
#datadir_input = '/home/disk/kalman3/rtardif/LMR/output/verification_production_runs'
#nexp = 'production_gis_ccsm4_pagesall_0.75'
#nexp = 'production_mlost_ccsm4_pagesall_0.75'
nexp = 'test_py3'
#verif_period = [[1880,2000],[0,1879]]
verif_period = [[1900,2000],[1800,1899]]
# Output directory, where the figs will be dumped.
#datadir_output = datadir_input # if want to keep things tidy
datadir_output = '.' # if want local plots
# =========================================================================================
# END: set user parameters here
# =========================================================================================
# =============================================================================
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<< Main code >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# =============================================================================
def main():
begin_time = time()
# =============================
# Loading the verification data
# =============================
vtype = {'assim': 'Assimilated proxies', 'verif':'Non-assimilated proxies'}
nbperiods = len(verif_period)
assim_dict = [dict() for x in range(nbperiods)]
verif_dict = [dict() for x in range(nbperiods)]
# loop over verification periods & load data in dictionaries
for p in range(nbperiods):
# Read the pickle files containing summary stats
fname_assim = datadir_input+'/'+nexp+'/'+'verifProxy_'+str(verif_period[p][0])+'to'+str(verif_period[p][1])+\
'/reconstruction_eval_assimilated_proxy_summary.pckl'
fname_verif = datadir_input+'/'+nexp+'/'+'verifProxy_'+str(verif_period[p][0])+'to'+str(verif_period[p][1])+\
'/reconstruction_eval_withheld_proxy_summary.pckl'
infile_assim = open(fname_assim,'rb')
assim_dict[p] = pickle.load(infile_assim)
infile_assim.close()
if os.path.isfile(fname_verif):
infile_verif = open(fname_verif,'rb')
verif_dict[p] = pickle.load(infile_verif)
infile_verif.close()
verif_data = True
else:
verif_data = False
# get list of all proxy types in the assimilated/withheld data
lst = []
for p in range(nbperiods):
a_sites = list(assim_dict[p].keys())
lst = lst + list(set([item[0] for item in a_sites]))
if verif_data:
v_sites = list(verif_dict[p].keys())
lst = lst + list(set([item[0] for item in v_sites]))
master_proxy_types = list(set([item for item in lst]))
master_proxy_types.insert(0,'All')
# ==================
# Now creating plots
# ==================
if datadir_output != '.':
figdir = datadir_output+'/VerifFigs'
if not os.path.isdir(figdir):
os.system('mkdir %s' % figdir)
else:
figdir = '.'
# ============================================================================================================
# 1) Histograms of (recon, proxy) CORRELATION, CE across grand ensemble for all proxy types and per proxy type
# ============================================================================================================
if make_plots_hist:
# loop over proxy types
for proxy in master_proxy_types:
print('Proxies: %s' %proxy)
fig = plt.figure(figsize=(12,8))
irow = 1
for v in list(vtype.keys()): # "assim" & "verif" proxies
if v == 'verif' and not verif_data:
break
ax_master = fig.add_subplot(2,1,irow)
# Turn off axis lines and ticks of the big subplot
ax_master.tick_params(labelcolor=(1.,1.,1., 0.0), top='off', bottom='off', left='off', right='off')
# Removes the white frame
ax_master._frameon = False
ax_master.set_title("%s\n" % vtype[v], fontsize=16, fontweight='bold')
facecolor = fcolor[0]
if v == 'assim':
pos = [1,2,3]
else:
pos = [4,5,6]
bins_corr = np.arange(-1.-binwidth/2, 1.+binwidth/2, binwidth)
bins_ce = np.arange(-2.-binwidth/2, 1.+binwidth/2, binwidth)
# 1) --- Correlation ---
ax = fig.add_subplot(2,3,pos[0])
mean_stat = np.zeros([nbperiods])
std_stat = np.zeros([nbperiods])
prior_tmp = []
stat_comp = []
for p in range(nbperiods):
# pick right dict and associate to "workdict"
dname = v+'_dict'
workdict = eval(dname)
sitetag = list(workdict[p].keys())
if proxy == 'All':
proxy_types = list(set([item[0] for item in sitetag]))
else:
proxy_types = proxy
tmp = [workdict[p][k]['MCensCorr'] for k in sitetag if k[0] in proxy_types and np.abs(workdict[p][k]['PSMinfo']['corr'])>=r_crit]
stat = [item for sublist in tmp for item in sublist] # flatten list of lists
nbdata = len(stat)
mean_stat[p] = np.mean(stat)
std_stat[p] = np.std(stat)
results, edges = np.histogram(stat, bins=bins_corr, normed=True)
plt.bar(edges[:-1]+binwidth/2,results,binwidth,color=fcolor[p],alpha=alpha,linewidth=0,align="center")
# kernel density estimation
statv = np.asarray(stat)
kde = sm.nonparametric.KDEUnivariate(statv)
nbpts, = statv.shape
if nbpts > 0:
kde.fit(kernel='gau')
plt.plot(kde.support,kde.density,color=fcolor[p],lw=2,label=str(verif_period[p][0])+' to '+str(verif_period[p][1]))
stat_comp.append(stat)
# Accumulate prior stat
tmp = [workdict[p][k]['PriorMCensCorr'] for k in sitetag if k[0] in proxy_types and np.abs(workdict[p][k]['PSMinfo']['corr'])>=r_crit]
prior_tmp.append([item for sublist in tmp for item in sublist]) # flatten list of lists
# Kolmogorov-Smirnov significance testing of difference between distributions from both tested periods
nbdist = len(stat_comp)
if nbdist > 1:
dist_test = stats.ks_2samp(stat_comp[0],stat_comp[1])
#print('Corr: %f %f' %(dist_test.statistic, dist_test.pvalue))
xmind,xmaxd,ymind,ymaxd = plt.axis()
prior_corr = [item for sublist in prior_tmp for item in sublist]
results, edges = np.histogram(prior_corr, bins=bins_corr, normed=True)
plt.plot(edges[:-1]+binwidth,results,linewidth=1,ls='steps',color='black',label='Prior')
plt.xlabel("Correlation",fontweight='bold')
plt.ylabel("Probability density",fontweight='bold')
ymin = 0.0
#ymax = 0.04; nbins = 4
#ymax = 0.05; nbins = 5 # for r_crit = 0.2
#ymax = 0.1; nbins = 5
#ymax = 2.0; nbins = 5
if proxy == 'All':
ymax = 2.0; nbins = 5
else:
ymax = ymaxd
plt.axis((CORRrange[0],CORRrange[1],ymin,ymax))
plt.locator_params(axis = 'y', nbins = nbins)
plt.legend(loc=2,fontsize=9,frameon=False,handlelength=1.2)
xmin,xmax,ymin,ymax = plt.axis()
xpos = xmin+0.025*(xmax-xmin)
ypos = ymin+0.5*(ymax-ymin)
for p in range(nbperiods):
plt.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(mean_stat[p]),fontsize=10,fontweight='bold',color=fcolor[p])
ypos = ypos-0.075*(ymax-ymin)
if nbdist > 1:
plt.text(xpos,ypos,' p-value = %s' %"{:.3f}".format(dist_test.pvalue),fontsize=9,fontweight='bold')
ypos = ypos-0.075*(ymax-ymin)
plt.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.mean(prior_corr)),fontsize=10,fontweight='bold')
# 2) --- CE ---
ax = fig.add_subplot(2,3,pos[1])
mean_stat = np.zeros([nbperiods])
std_stat = np.zeros([nbperiods])
prior_tmp = []
stat_comp = []
for p in range(nbperiods):
# pick right dict and associate to "workdict"
dname = v+'_dict'
workdict = eval(dname)
sitetag = list(workdict[p].keys())
if proxy == 'All':
proxy_types = list(set([item[0] for item in sitetag]))
else:
proxy_types = proxy
tmp = [workdict[p][k]['MCensCE'] for k in sitetag if k[0] in proxy_types and np.abs(workdict[p][k]['PSMinfo']['corr'])>=r_crit]
stat = [item for sublist in tmp for item in sublist] # flatten list of lists
nbdata = len(stat)
mean_stat[p] = np.mean(stat)
std_stat[p] = np.std(stat)
# Since CE is not bounded at the lower end, assign values smaller than 1st bin to value of 1st bin
#stat = [bins[0] if x<bins[0] else x for x in stat]
results, edges = np.histogram(stat, bins=bins_ce, normed=True)
plt.bar(edges[:-1],results,binwidth,color=fcolor[p],alpha=alpha,linewidth=0)
# kernel density estimation
statv = np.asarray(stat)
kde = sm.nonparametric.KDEUnivariate(statv)
nbpts, = statv.shape
if nbpts > 0:
kde.fit(kernel='gau')
plt.plot(kde.support,kde.density,color=fcolor[p],lw=2,label=str(verif_period[p][0])+' to '+str(verif_period[p][1]))
stat_comp.append(stat)
# Accumulate prior stat
tmp = [workdict[p][k]['PriorMCensCE'] for k in sitetag if k[0] in proxy_types and np.abs(workdict[p][k]['PSMinfo']['corr'])>=r_crit]
prior_tmp.append([item for sublist in tmp for item in sublist]) # flatten list of lists
# Kolmogorov-Smirnov significance testing of difference between distributions from both tested periods
nbdist = len(stat_comp)
if nbdist > 1:
dist_test = stats.ks_2samp(stat_comp[0],stat_comp[1])
#print('CE: %f %f' %(dist_test.statistic, dist_test.pvalue))
prior_ce = [item for sublist in prior_tmp for item in sublist]
# Since CE is not bounded at the lower end, assign values smaller than 1st bin to value of 1st bin
prior_ce = [bins_ce[0] if x<bins_ce[0] else x for x in prior_ce]
results, edges = np.histogram(prior_ce, bins=bins_ce, normed=True)
plt.plot(edges[:-1]+binwidth,results,linewidth=1,ls='steps',color='black',label='Prior')
plt.xlabel("Coefficient of efficiency",fontweight='bold')
plt.ylabel("Probability density",fontweight='bold')
xmin,xmax,ymin,ymax = plt.axis()
ymin = 0.0
#ymax = 0.45
#ymax = 0.1 # for r_crit = 0.2
#ymax = 0.5; nbins = 5
ymax = 12.0; nbins = 6
plt.axis((CErange[0],CErange[1],ymin,ymax))
plt.legend(loc=2,fontsize=9,frameon=False,handlelength=1.2)
xmin,xmax,ymin,ymax = plt.axis()
xpos = xmin+0.025*(xmax-xmin)
ypos = ymin+0.5*(ymax-ymin)
for p in range(nbperiods):
plt.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(mean_stat[p]),fontsize=10,fontweight='bold',color=fcolor[p])
ypos = ypos-0.075*(ymax-ymin)
if nbdist > 1:
plt.text(xpos,ypos,' p-value = %s' %"{:.3f}".format(dist_test.pvalue),fontsize=9,fontweight='bold')
ypos = ypos-0.075*(ymax-ymin)
plt.text(xpos,ypos,'Mean = %s' %"{:.2f}".format(np.mean(prior_ce)),fontsize=10,fontweight='bold')
# 3) --- Change in CE from prior to posterior ---
ax = fig.add_subplot(2,3,pos[2])
prior_tmp = []
stat_comp = []
for p in range(nbperiods):
# pick right dict and associate to "workdict"
dname = v+'_dict'
workdict = eval(dname)
sitetag = list(workdict[p].keys())
if proxy == 'All':
proxy_types = list(set([item[0] for item in sitetag]))
else:
proxy_types = proxy
tmpPost = [workdict[p][k]['MCensCE'] for k in sitetag if k[0] in proxy_types and np.abs(workdict[p][k]['PSMinfo']['corr'])>=r_crit]
tmpPrior = [workdict[p][k]['PriorMCensCE'] for k in sitetag if k[0] in proxy_types and np.abs(workdict[p][k]['PSMinfo']['corr'])>=r_crit]
statPost = [item for sublist in tmpPost for item in sublist] # flatten list of lists
statPrior = [item for sublist in tmpPrior for item in sublist] # flatten list of lists
# difference
stat = [statPost[i]-statPrior[i] for i in range(len(statPost))]
nbdata = len(stat)
mean_stat = np.mean(stat)
std_stat = np.std(stat)
# % of positive change
dCEplus = [stat[i] for i in range(len(stat)) if stat[i] > 0.0]
if nbdata > 0:
frac = int(float(len(dCEplus))/float(len(stat))*100.)
fractiondCEplus = str(int('%d' % frac ))
else:
fractiondCEplus = 'n/a'
print('CE_stats: period= ', str('%12s' %verif_period[p]), ' category= ', v, ':', str('%8s' %str(len(dCEplus))), str('%8s' %str(len(stat))), \
' Fraction of +change:', fractiondCEplus, '%')
results, edges = np.histogram(stat, bins=bins_ce, normed=True)
leg = str(verif_period[p][0])+' to '+str(verif_period[p][1])+' : % +change='+str(fractiondCEplus)
plt.bar(edges[:-1],results,binwidth,color=fcolor[p],alpha=alpha,linewidth=0)
# kernel density estimation
statv = np.asarray(stat)
kde = sm.nonparametric.KDEUnivariate(statv)
nbpts, = statv.shape
if nbpts > 0:
kde.fit(kernel='gau')
plt.plot(kde.support,kde.density,color=fcolor[p],lw=2,label=leg)
stat_comp.append(stat)
# Kolmogorov-Smirnov significance testing of difference between distributions from both periods
nbdist = len(stat_comp)
if nbdist > 1:
dist_test = stats.ks_2samp(stat_comp[0],stat_comp[1])
#print('deltaCE: %f %f' %(dist_test.statistic, dist_test.pvalue))
plt.xlabel("Change in coefficient of efficiency",fontweight='bold')
plt.ylabel("Probability density",fontweight='bold')
xmin,xmax,ymin,ymax = plt.axis()
ymin = 0.0
ymax = 8.0; nbins = 5
plt.axis((CEchangerange[0],CEchangerange[1],ymin,ymax))
plt.legend(loc=2,fontsize=9,frameon=False,handlelength=1.2)
if nbdist > 1:
xmin,xmax,ymin,ymax = plt.axis()
xpos = xmin+0.025*(xmax-xmin)
ypos = ymin+0.75*(ymax-ymin)
plt.text(xpos,ypos,' p-value = %s' %"{:.3f}".format(dist_test.pvalue),fontsize=9,fontweight='bold')
irow = irow + 1
fig.tight_layout()
if proxy == 'All':
proxy_tag = 'Allproxies'
else:
proxy_tag = proxy.replace(' ','_')
plt.savefig('%s/%s_verify_proxy_hist_corr_ce_%s.png' % (figdir,nexp,proxy_tag),bbox_inches='tight')
if make_pdfs:
plt.savefig('%s/%s_verify_proxy_hist_corr_ce_%s.pdf' % (figdir,nexp,proxy_tag),bbox_inches='tight',dpi=300, format='pdf')
plt.close()
# ==========================================================================
# PART 2: MAPS of site-based verification metrics --------------------------
# ==========================================================================
if make_plots_maps:
#water = '#9DD4F0'
#continents = '#888888'
water = '#D3ECF8'
continents = '#F2F2F2'
# Loop over proxy sets (assim vs verif)
for v in list(vtype.keys()):
# Loop over verification periods
for p in range(nbperiods):
# pick right dict and associate to "workdict"
dname = v+'_dict'
workdict = eval(dname)
sites = list(workdict[p].keys())
proxy_types = list(set([item[0] for item in sitetag]))
verif_period_label = str(verif_period[p][0])+'-'+str(verif_period[p][1])
proxy_types = []
for sitetag in sites:
sitetype = sitetag[0]
if sitetype not in proxy_types:
proxy_types.append(sitetype)
proxy_types = sorted(proxy_types)
m = Basemap(projection='robin', lat_0=0, lon_0=0,resolution='l', area_thresh=700.0); latres = 20.; lonres=40. # GLOBAL
x, y = m(0.,0.)
l = []
for sitetype in sorted(proxy_types):
l.append(m.scatter(x,y,35,c='white',marker=proxy_verif[sitetype],edgecolor='black',linewidth='1'))
# ===========================================================================
# 2) Maps with proxy sites plotted with dots colored according to correlation
# ===========================================================================
verif_metric = 'Correlation'
mapcolor = plt.cm.seismic
cbarfmt = '%4.1f'
fmin = -1.0; fmax = 1.0
fval = np.linspace(fmin, fmax, 100); fvalc = np.linspace(0, fmax, 101);
scaled_colors = mapcolor(fvalc)
cmap, norm = from_levels_and_colors(levels=fval, colors=scaled_colors, extend='both')
cbarticks=np.linspace(fmin,fmax,11)
fig = plt.figure(figsize=[8,5])
#ax = fig.add_axes([0.1,0.1,0.8,0.8])
m = Basemap(projection='robin', lat_0=0, lon_0=0,resolution='l', area_thresh=700.0); latres = 20.; lonres=40. # GLOBAL
m.drawmapboundary(fill_color=water)
m.drawcoastlines(linewidth=0.5); m.drawcountries(linewidth=0.5)
m.fillcontinents(color=continents,lake_color=water)
m.drawparallels(np.arange(-80.,81.,latres),linewidth=0.5)
m.drawmeridians(np.arange(-180.,181.,lonres),linewidth=0.5)
# loop over proxy sites
for sitetag in sites:
sitetype = sitetag[0]
sitename = sitetag[1]
sitemarker = proxy_verif[sitetype]
lat = workdict[p][sitetag]['lat']
lon = workdict[p][sitetag]['lon']
x, y = m(lon,lat)
Gplt = m.scatter(x,y,35,c=workdict[p][sitetag]['MeanCorr'],marker=sitemarker,edgecolor='black',linewidth='1',zorder=4,cmap=cmap,norm=norm)
cbar = m.colorbar(Gplt,location='right',pad="2%",size="2%",ticks=cbarticks,format=cbarfmt,extend='both')
cbar.outline.set_linewidth(1.0)
cbar.set_label('%s' % verif_metric,size=11,weight='bold')
cbar.ax.tick_params(labelsize=10)
plt.title('Period: '+verif_period_label+' : '+vtype[v],fontweight='bold')
plt.legend(l,proxy_types,
scatterpoints=1,
loc='lower center', bbox_to_anchor=(0.5, -0.30),
ncol=3,
fontsize=9)
plt.savefig('%s/%s_verify_proxy_map_%s_corr_%s.png' % (figdir,nexp,v,verif_period_label),bbox_inches='tight')
if make_pdfs:
plt.savefig('%s/%s_verify_proxy_map_%s_corr_%s.pdf' % (figdir,nexp,v,verif_period_label),bbox_inches='tight', dpi=300, format='pdf')
plt.close()
# ===========================================================================
# 3) Maps with proxy sites plotted with dots colored according to CE
# ===========================================================================
verif_metric = 'Coefficient of efficiency'
mapcolor = plt.cm.seismic
cbarfmt = '%4.1f'
fmin = -1.0; fmax = 1.0
fval = np.linspace(fmin, fmax, 100); fvalc = np.linspace(0, fmax, 101);
scaled_colors = mapcolor(fvalc)
cmap, norm = from_levels_and_colors(levels=fval, colors=scaled_colors, extend='both')
cbarticks=np.linspace(fmin,fmax,11)
# Prior & Posterior
fig = plt.figure(figsize=[8,10])
dplot = {'Prior':'PriorMeanCE', 'Posterior':'MeanCE'}
irow = 1
for dd in list(dplot.keys()):
ax = fig.add_subplot(2,1,irow)
m = Basemap(projection='robin', lat_0=0, lon_0=0,resolution='l', area_thresh=700.0); latres = 20.; lonres=40. # GLOBAL
m.drawmapboundary(fill_color=water)
m.drawcoastlines(linewidth=0.5); m.drawcountries(linewidth=0.5)
m.fillcontinents(color=continents,lake_color=water)
m.drawparallels(np.arange(-80.,81.,latres),linewidth=0.5)
m.drawmeridians(np.arange(-180.,181.,lonres),linewidth=0.5)
# loop over proxy sites
for sitetag in sites:
sitetype = sitetag[0]
sitename = sitetag[1]
sitemarker = proxy_verif[sitetype]
lat = workdict[p][sitetag]['lat']
lon = workdict[p][sitetag]['lon']
x, y = m(lon,lat)
plot_var = dplot[dd]
Gplt = m.scatter(x,y,35,c=workdict[p][sitetag][plot_var],marker=sitemarker,edgecolor='black',linewidth='1',zorder=4,cmap=cmap,norm=norm)
cbar = m.colorbar(Gplt,location='right',pad="2%",size="2%",ticks=cbarticks,format=cbarfmt,extend='both')
cbar.outline.set_linewidth(1.0)
cbar.set_label('%s' % verif_metric,size=11,weight='bold')
cbar.ax.tick_params(labelsize=10)
if irow == 1:
plt.title('Period: '+verif_period_label+'\n\n'+vtype[v]+' : '+ dd,fontweight='bold')
else:
plt.title(vtype[v]+' : '+ dd,fontweight='bold')
irow = irow + 1
plt.legend(l,proxy_types,
scatterpoints=1,
loc='lower center', bbox_to_anchor=(0.5, -0.30),
ncol=3,
fontsize=9)
fig.tight_layout()
plt.savefig('%s/%s_verify_proxy_map_%s_ce_%s.png' % (figdir,nexp,v,verif_period_label),bbox_inches='tight')
if make_pdfs:
plt.savefig('%s/%s_verify_proxy_map_%s_ce_%s.pdf' % (figdir,nexp,v,verif_period_label),bbox_inches='tight', dpi=300, format='pdf')
plt.close()
# ============================================================================
# 4) Maps with proxy sites plotted with dots colored according to change in CE
# ============================================================================
# Change in CE from Prior to Posterior
fig = plt.figure(figsize=[8,5])
m = Basemap(projection='robin', lat_0=0, lon_0=0,resolution='l', area_thresh=700.0); latres = 20.; lonres=40. # GLOBAL
m.drawmapboundary(fill_color=water)
m.drawcoastlines(linewidth=0.5); m.drawcountries(linewidth=0.5)
m.fillcontinents(color=continents,lake_color=water)
m.drawparallels(np.arange(-80.,81.,latres),linewidth=0.5)
m.drawmeridians(np.arange(-180.,181.,lonres),linewidth=0.5)
# loop over proxy sites
for sitetag in sites:
sitetype = sitetag[0]
sitename = sitetag[1]
sitemarker = proxy_verif[sitetype]
lat = workdict[p][sitetag]['lat']
lon = workdict[p][sitetag]['lon']
x, y = m(lon,lat)
plot_var = workdict[p][sitetag]['MeanCE'] - workdict[p][sitetag]['PriorMeanCE']
Gplt = m.scatter(x,y,35,c=plot_var,marker=sitemarker,edgecolor='black',linewidth='1',zorder=4,cmap=cmap,norm=norm)
cbar = m.colorbar(Gplt,location='right',pad="2%",size="2%",ticks=cbarticks,format=cbarfmt,extend='both')
cbar.outline.set_linewidth(1.0)
cbar.set_label('Change in coefficient of efficiency',size=11,weight='bold')
cbar.ax.tick_params(labelsize=10)
plt.title('Period: '+verif_period_label+' : '+vtype[v],fontweight='bold')
plt.legend(l,proxy_types,
scatterpoints=1,
loc='lower center', bbox_to_anchor=(0.5, -0.30),
ncol=3,
fontsize=9)
fig.tight_layout()
plt.savefig('%s/%s_verify_proxy_map_%s_delta_ce_%s.png' % (figdir,nexp,v,verif_period_label),bbox_inches='tight')
if make_pdfs:
plt.savefig('%s/%s_verify_proxy_map_%s_delta_ce_%s.pdf' % (figdir,nexp,v,verif_period_label),bbox_inches='tight', dpi=300, format='pdf')
plt.close()
# ==========================================================================================
# 5) Maps with proxy sites plotted with dots colored according to ensemble calibration ratio
# ==========================================================================================
verif_metric = 'Ensemble calibration'
mapcolor = plt.cm.seismic
cbarfmt = '%4.1f'
fmin = 0.0; fmax = 2.0
fval = np.linspace(fmin, fmax, 100); fvalc = np.linspace(0, 1, 100);
scaled_colors = mapcolor(fvalc)
cmap, norm = from_levels_and_colors(levels=fval, colors=scaled_colors, extend='max')
cbarticks=np.linspace(fmin,fmax,11)
# Prior & Posterior
fig = plt.figure(figsize=[8,10])
dplot = {'Prior':'PriorMeanCalRatio', 'Posterior':'MeanCalRatio'}
irow = 1
for dd in list(dplot.keys()):
ax = fig.add_subplot(2,1,irow)
m = Basemap(projection='robin', lat_0=0, lon_0=0,resolution='l', area_thresh=700.0); latres = 20.; lonres=40. # GLOBAL
m.drawmapboundary(fill_color=water)
m.drawcoastlines(linewidth=0.5); m.drawcountries(linewidth=0.5)
m.fillcontinents(color=continents,lake_color=water)
m.drawparallels(np.arange(-80.,81.,latres),linewidth=0.5)
m.drawmeridians(np.arange(-180.,181.,lonres),linewidth=0.5)
# loop over proxy sites
for sitetag in sites:
sitetype = sitetag[0]
sitename = sitetag[1]
sitemarker = proxy_verif[sitetype]
lat = workdict[p][sitetag]['lat']
lon = workdict[p][sitetag]['lon']
x, y = m(lon,lat)
plot_var = dplot[dd]
Gplt = m.scatter(x,y,35,c=workdict[p][sitetag][plot_var],marker=sitemarker,edgecolor='black',linewidth='1',zorder=4,cmap=cmap,norm=norm)
cbar = m.colorbar(Gplt,location='right',pad="2%",size="2%",ticks=cbarticks,format=cbarfmt,extend='max')
cbar.outline.set_linewidth(1.0)
cbar.set_label('%s' % verif_metric,size=11,weight='bold')
cbar.ax.tick_params(labelsize=10)
if irow == 1:
plt.title('Period: '+verif_period_label+'\n\n'+vtype[v]+' : '+ dd,fontweight='bold')
else:
plt.title(vtype[v]+' : '+ dd,fontweight='bold')
irow = irow + 1
plt.legend(l,proxy_types,
scatterpoints=1,
loc='lower center', bbox_to_anchor=(0.5, -0.30),
ncol=3,
fontsize=9)
fig.tight_layout()
plt.savefig('%s/%s_verify_proxy_map_%s_EnsCal_%s.png' % (figdir,nexp,v,verif_period_label),bbox_inches='tight')
if make_pdfs:
plt.savefig('%s/%s_verify_proxy_map_%s_EnsCal_%s.pdf' % (figdir,nexp,v,verif_period_label),bbox_inches='tight', dpi=300, format='pdf')
plt.close()
# ==========================================================================
# PART 3: Plots of individual time series of proxy records ----------------
# ==========================================================================
if make_plots_individual_sites:
assimilated_sites = sorted(assim_dict[p].keys())
withheld_sites = verif_dict[p].keys()
if nbperiods > 1:
period_bnds = [item[0] for item in verif_period]
else:
period_bnds = verif_period[0]
# Loop over sites
siteCount = 0
for site in assimilated_sites:
sitename = site[0].replace(' ','_')+'_'+site[1].replace(' ','_')
AssimPriorR = np.zeros([nbperiods],dtype=float)
AssimPriorCE = np.zeros([nbperiods],dtype=float)
AssimReconR = np.zeros([nbperiods],dtype=float)
AssimReconCE = np.zeros([nbperiods],dtype=float)
VerifPriorR = np.zeros([nbperiods],dtype=float)
VerifPriorCE = np.zeros([nbperiods],dtype=float)
VerifReconR = np.zeros([nbperiods],dtype=float)
VerifReconCE = np.zeros([nbperiods],dtype=float)
# setup the figure
fig, ax = plt.subplots(2,1, figsize=(10,6))
plt.subplots_adjust(bottom=0.35,hspace=0.4)
# loop over periods
for p in range(nbperiods):
# assimilated --
try:
assimCE = '{:7.2f}'.format( | np.mean(assim_dict[p][site]['MCensCE']) | numpy.mean |
import socket
import time
import numpy as np
import os
from src.algorithms.QDoubleDeepLearn import QLearn # can be QLearn, QDeepLearn, QDoubleDeepLearn or RandomAgent
from src.environments.jsbsim.JSBSimEnv import Env # can be jsbsim.JSBSimEnv or xplane.XPlaneEnv
from src.scenarios.deltaAttitudeControlScene import Scene # can be deltaAttitudeControlScene, sparseAttitudeControlScene or cheatingAttitudeControlScene
errors = 0.0 # counts everytime the UDP packages are lost on all retries
connectAttempts = 0.0 # counts everytime the UDP packages are lost on a single retry
experimentName = "Testing"
notes = "This experiment was run..." # add notes that will be saved to the setup file to clearify the experiment setup better
dateTime = str(time.ctime(time.time()))
dateTime = dateTime.replace(":", "-")
dateTime = dateTime.replace(" ", "_")
experimentName = experimentName + "-" + dateTime
timeStart = time.time() # used to measure time
timeEnd = time.time() # used to measure time
logPeriod = 100 # every so many epochs the metrics will be printed into the console
savePeriod = 25 # every so many epochs the table/model will be saved to a file
pauseDelay = 0.01 # time an action is being applied to the environment
logDecimals = 4 # sets decimals for np.arrays to X for printing
np.set_printoptions(precision=logDecimals) # sets decimals for np.arrays to X for printing
n_epochs = 5 # Number of generations
n_steps = 2_500 # Number of inputs per generation
n_actions = 4 # Number of possible inputs to choose from
n_states = 182 # Number of states
gamma = 0.95 # The discount rate - between 0 an 1! if = 0 then no learning, ! The higher it is the more the new q will factor into the update of the q value
lr = 0.0001 # Learning Rate. If LR is 0 then the Q value would not update. The higher the value the quicker the agent will adopt the NEW Q value. If lr = 1, the updated value would be exactly be the newly calculated q value, completely ignoring the previous one
epsilon = 0.0 # Starting Epsilon Rate, affects the exploration probability. Will decay
decayRate = 0.00001 # Rate at which epsilon will decay per step
epsilonMin = 0.1 # Minimum value at which epsilon will stop decaying
n_epochsBeforeDecay = 10 # number of games to be played before epsilon starts to decay
numOfInputs = 7 # Number of inputs fed to the model
stateDepth = 1 # Number of old observations kept for current state. State will consist of s(t) ... s(t_n)
minReplayMemSize = 1_000 # min size determines when the replay will start being used
replayMemSize = 100_000 # Max size for the replay buffer
batchSize = 256 # Batch size for the model
updateRate = 5 # update target model every so many episodes
startingOffset = 0 # is used if previous Results are loaded.
loadModel = True # will load "model.h5" for tf if True (model.npy for non-Deep)
loadMemory = False # will load "memory.pickle" if True
loadResults = False # will load "results.npy" if True
jsbRender = True # will send UDP data to flight gear for rendering if True
jsbRealTime = False # will slow down the physics to portrait real time rendering
usePredefinedSeeds = False # Sets seeds for tf, np and random for more replicable results (not fully replicable due to stochastic environments)
saveForAutoReload = False # Saves and overrides models, results and memory to the root
plotTest = True # Will plot roll, pitch and reward per episode
startingVelocity = 60
startingPitchRange = 10
startingRollRange = 15
randomDesiredState = True # Set a new state to stabalize towards every episode
desiredPitchRange = 5
desiredRollRange = 5
rewardListSingleEpisode = []
pitchListSingleEpisode = []
rollListSingleEpisode = []
dictObservation = {
"lat": 0,
"long": 1,
"alt": 2,
"pitch": 3,
"roll": 4,
"yaw": 5,
"gear": 6}
dictAction = {
"pi+": 0,
"pi-": 1,
"ro+": 2,
"ro-": 3,
"ru+": 4,
"ru-": 5,
"no": 6}
dictErrors = {
"reset": 0,
"update": 0,
"step": 0}
dictRotation = {
"roll": 0,
"pitch": 1,
"yaw": 2,
"northVelo": 3,
"eastVelo": 4,
"verticalVelo": 5}
# -998->NO CHANGE
flightOrigin = [35.126, 126.809, 14_000, 0, 0, 0, 1] # Gwangju SK
flightDestinaion = [33.508, 126.487, 6000, -998, -998, -998, 1] # Jeju SK
# Other locations to use: Memmingen: [47.988, 10.240], Chicago: [41.976, -87.902]
fallbackState = [0] * numOfInputs # Used in case of connection error to XPlane
fallbackState = [tuple(fallbackState)]
# Will load previous results in case a experiment needs to be continued
if(loadResults):
movingEpRewards = np.load("results.npy", allow_pickle=True).item() # loads the file - .item() turns the loaded nparray back to a dict
startingOffset = np.max(movingEpRewards["epoch"]) # loads the episode where it previously stopped
epsilon = | np.min(movingEpRewards["epsilon"]) | numpy.min |
"""
pyart.retrieve.velocity_azimuth_display
=======================================
Retrieval of VADs from a radar object.
This is code is adapted from code written by <NAME>.
.. autosummary::
:toctreeL generated/
:template: dev_template.rst
velocity_azimuth_display
_vad_calculation
_inverse_dist_squared
_Average1D
"""
import numpy as np
from pyart.core import HorizontalWindProfile
def velocity_azimuth_display(
radar, velocity, z_want=None,
valid_ray_min=16, gatefilter=None, window=2,
weight='equal'):
"""
Velocity azimuth display.
Note: This code uses only one sweep. Before using the
velocity_azimuth_display function, use, for example:
one_sweep_radar = radar.extract_sweeps([0])
Parameters
----------
radar : Radar
Radar object used.
velocity : string
Velocity field to use for VAD calculation.
Other Parameters
----------------
z_want : array
Array of desired heights to be sampled for the vad
calculation.
valid_ray_min : int
Amount of rays required to include that level in
the VAD calculation.
gatefilter : GateFilter
A GateFilter indicating radar gates that should be excluded when
from the import vad calculation.
window : int
Value to use for window when determing new values in the
_Averag1D function.
weight : string
A string to indicate weighting method to use. 'equal' for
equal weighting when interpolating or 'idw' for inverse
distribution squared weighting for interpolating.
Default is 'equal'.
Returns
-------
height : array
Heights in meters above sea level at which horizontal winds were
sampled.
speed : array
Horizontal wind speed in meters per second at each height.
direction : array
Horizontal wind direction in degrees at each height.
u_wind : array
U-wind mean in meters per second.
v_wind : array
V-wind mean in meters per second.
Reference
----------
<NAME> and <NAME>, 1968: The Determination
of Kinematic Properties of a Wind Field Using Doppler
Radar. J. Appl. Meteor., 7, 105–113
"""
velocities = radar.fields[velocity]['data']
if gatefilter is not None:
velocities = np.ma.masked_where(
gatefilter.gate_excluded, velocities)
azimuths = radar.azimuth['data'][:]
elevation = radar.fixed_angle['data'][0]
u_wind, v_wind = _vad_calculation(velocities, azimuths,
elevation, valid_ray_min)
bad = np.logical_or(np.isnan(u_wind), np.isnan(v_wind))
good_u_wind = u_wind[~bad]
good_v_wind = v_wind[~bad]
radar_height = radar.gate_z['data'][0]
good_height = radar_height[~bad]
if z_want is None:
z_want = np.linspace(0, 1000, 100)[:50]
else:
z_want
try:
print('max height', np.max(good_height), ' meters')
print('min height', np.min(good_height), ' meters')
except ValueError:
raise ValueError('Not enough data in this radar sweep ' \
'for a vad calculation.')
u_interp = _Average1D(good_height, good_u_wind,
z_want[1] - z_want[0] / window, weight)
v_interp = _Average1D(good_height, good_v_wind,
z_want[1] - z_want[0] / window, weight)
u_wanted = u_interp(z_want)
v_wanted = v_interp(z_want)
u_wanted = np.ma.masked_equal(u_wanted, 99999.)
v_wanted = np.ma.masked_equal(v_wanted, 99999.)
vad = HorizontalWindProfile.from_u_and_v(
z_want, u_wanted, v_wanted)
return vad
def _vad_calculation(velocities, azimuths,
elevation, valid_ray_min):
""" Calculates VAD for a scan and returns u_mean and
v_mean. velocities is a 2D array, azimuths is a 1D
array, elevation is a number.
Note:
We need to solve: Ax = b
where:
A = [sum_sin_squared_az, sum_sin_cos_az ] = [a, b]
[sum_sin_cos_az, sum_cos_squared_az] [c, d]
b = [sum_sin_vel_dev] = [b_1]
[sum_cos_vel_dev] [b_2]
The solution to this is:
x = A-1 * b
A-1 is:
1 [ d, -b ]
--- * [ -c, a ]
|A|
and the determinate, det is: det = a*d - b*c
Therefore the elements of x are:
x_1 = (d* b_1 + -b * b_2) / det = (d*b_1 - b*b_2) / det
x_2 = (-c * b_1 + a * b_2) / det = (a*b_2 - c*b_1) / det
"""
velocities = velocities.filled(np.nan)
shape = velocities.shape
_, nbins = velocities.shape
invalid = np.isnan(velocities)
valid_rays_per_gate = np.sum(~np.isnan(velocities), axis=0)
too_few_valid_rays = valid_rays_per_gate < valid_ray_min
invalid[:, too_few_valid_rays] = True
sin_az = np.sin(np.deg2rad(azimuths))
cos_az = np.cos(np.deg2rad(azimuths))
sin_az = np.repeat(sin_az, nbins).reshape(shape)
cos_az = np.repeat(cos_az, nbins).reshape(shape)
sin_az[invalid] = np.nan
cos_az[invalid] = np.nan
mean_velocity_per_gate = np.nanmean(velocities, axis=0).reshape(1, -1)
velocity_deviation = velocities - mean_velocity_per_gate
sum_cos_vel_dev = np.nansum(cos_az * velocity_deviation, axis=0)
sum_sin_vel_dev = np.nansum(sin_az * velocity_deviation, axis=0)
sum_sin_cos_az = np.nansum(sin_az * cos_az, axis=0)
sum_sin_squared_az = | np.nansum(sin_az**2, axis=0) | numpy.nansum |
import numpy
import pytest
from numpy import cos, pi, sin
import quadpy
def test_simple():
val, _ = quadpy.line_segment.integrate_adaptive(sin, [0.0, pi], 1.0e-10)
exact = 2.0
assert abs(exact - val) < 1.0e-10
val, _ = quadpy.line_segment.integrate_adaptive(
lambda x: x * sin(x), [0.0, pi], 1.0e-10
)
exact = pi
assert abs(exact - val) < 1.0e-10
@pytest.mark.parametrize("k", range(1, 6))
def test_vector_valued(k):
# We need to set eps_rel=None here since the second integral can be 0. This leads to
# an unreachable stopping criterion.
val, err = quadpy.line_segment.integrate_adaptive(
lambda x: [x * sin(k * x), x * cos(k * x)], [0.0, pi], 1.0e-10, eps_rel=None
)
exact = [
(sin(pi * k) - pi * k * cos(pi * k)) / k ** 2,
(cos(pi * k) + pi * k * sin(pi * k) - 1.0) / k ** 2,
]
assert numpy.all(err < 1.0e-10)
assert numpy.all(numpy.abs(exact - val) < 1.0e-10)
def test_multidim():
# simple scalar integration
val, err = quadpy.line_segment.integrate_adaptive(sin, [0.0, 1.0])
assert err < 1.0e-10
assert val.shape == ()
exact = 1.0 - cos(1.0)
assert abs(val - exact) < 1.0e-10
# scalar integration on 3 subdomains
val, err = quadpy.line_segment.integrate_adaptive(
sin, [[0.0, 1.0, 2.0], [1.0, 2.0, 3.0]]
)
assert err.shape == (3,)
assert all(e < 1.0e-10 for e in err)
assert val.shape == (3,)
exact = [cos(0.0) - cos(1.0), cos(1.0) - cos(2.0), cos(2.0) - cos(3.0)]
assert all(abs(v - ex) < 1.0e-10 for v, ex in zip(val, exact))
# scalar integration in 3D
alpha = 10.31
val, err = quadpy.line_segment.integrate_adaptive(
lambda x: sin(alpha * x[0]), [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
)
assert err < 1.0e-10
assert val.shape == ()
exact = -1 / alpha * (cos(alpha * 1.0) - cos(alpha * 0.0))
assert abs(val - exact) < 1.0e-10
# vector-valued integration on 1 subdomain
val, err = quadpy.line_segment.integrate_adaptive(
lambda x: [sin(x), cos(x)], [0.0, 1.0]
)
assert err.shape == (2,)
assert all(e < 1.0e-10 for e in err)
exact = [cos(0.0) - cos(1.0), sin(1.0) - sin(0.0)]
assert val.shape == (2,)
assert all(abs(v - ex) < 1.0e-10 for v, ex in zip(val, exact))
# vector-valued integration on 3 subdomains
val, err = quadpy.line_segment.integrate_adaptive(
lambda x: [sin(x), cos(x)], [[0.0, 1.0, 2.0], [1.0, 2.0, 3.0]]
)
assert err.shape == (2, 3)
assert numpy.all(err < 1.0e-10)
assert val.shape == (2, 3)
exact = [
[cos(0.0) - cos(1.0), cos(1.0) - cos(2.0), cos(2.0) - cos(3.0)],
[sin(1.0) - sin(0.0), sin(2.0) - sin(1.0), sin(3.0) - sin(2.0)],
]
assert numpy.all(numpy.abs(val - exact) < 1.0e-10)
# vector-valued integration in 3D
val, err = quadpy.line_segment.integrate_adaptive(
lambda x: [x[0] + sin(x[1]), | cos(x[0]) | numpy.cos |
"""
Collection of quick and simple plotting functions
horizontal_map - driver to make two nice horizontal maps next to each other
depth_slice - same, but for contour plot of depth vs some coordinate
_nice_plot - underlying script for a single nice figure
"""
from copy import copy
import numpy as np
import matplotlib.pyplot as plt
import cmocean
import xarray as xr
from warnings import warn
try:
import ecco_v4_py as ecco
except ImportError:
print('You need to reorganize pych at some point')
from matplotlib.ticker import MultipleLocator
from .utils import get_cmap_rgb
def plot_logbin(xda,x=None, y=None,
nbins=3,bin_edges=None,
ax=None,
cmap='RdBu_r',
cbar_label=None,
**kwargs):
"""Make a plot, binning field by log10 of values
Parameters
----------
xda : xarray.DataArray
field to be plotted, must be 2D
x, y : array_like, optional
x and y coordinates for the plot
nbins : int, optional
number of colored bin (centers) positive and negative values
i.e. we get 2*nbins+1, bins. one is neutral (middle)
bin_edges : array-like, optional
exclusive with nbins, specify bin edges (positive only)
ax : matplotlib.axes, optional
to make plot at
cmap : str, optional
specifies colormap
cbar_label : str, optional
label for colorbar, default grabs units from DataArray
kwargs
passed to matpotlib.pyplot.contourf
Returns
-------
ax : matplotlib.axes
if one is not provided
"""
return_ax = False
if ax is None:
_,ax = plt.subplots()
return_ax=True
if nbins is not None and bin_edges is not None:
raise TypeError('one or the other')
log = np.log10(np.abs(xda))
log = log.where((~np.isnan(log)) & (~np.isinf(log)),0.)
if nbins is not None:
_,bin_edges = np.histogram(log,bins=nbins)
else:
nbins = len(bin_edges)-1
logbins=np.round(bin_edges)
# determine if colorbar will be extended
maxExtend = (xda>10**logbins[-1]).any().values
minExtend = (xda<-10**logbins[-1]).any().values
extend='neither'
if minExtend and maxExtend:
extend='both'
elif maxExtend:
extend='max'
elif minExtend:
extend='min'
# determine number of colors, adding one for each extension
# and always one extra, the middle color bin
ncolors=2*nbins+1
ncolors = ncolors+1 if maxExtend else ncolors
ncolors = ncolors+1 if minExtend else ncolors
# if only one end is extended,
# chop off the extreme value from the other end to fit
# in the middle (neutral) colorbin
if extend in ['min' ,'max']:
cmap = get_cmap_rgb(cmap,ncolors+1)
bot = np.arange(1,nbins+1) if extend=='max' else np.arange(0,nbins+1)
top = np.arange(ncolors-nbins,ncolors) if extend=='min' else np.arange(ncolors-nbins,ncolors+1)
index = list(bot)+[nbins+1]+list(top)
cmap = cmap[index,:]
else:
cmap=get_cmap_rgb(cmap,ncolors)
# levels and plot
levels=10**logbins
levels = np.concatenate([-levels[::-1],levels],axis=0)
if x is None or y is None:
im=ax.contourf(xda,levels=levels,colors=cmap,extend=extend,**kwargs)
else:
im=ax.contourf(x,y,xda,levels=levels,colors=cmap,extend=extend,**kwargs)
# label dem ticks
if cbar_label==None and 'units' in xda.attrs:
cbar_label=f'[{xda.attrs["units"]}]'
p=plt.colorbar(im,ax=ax,label=cbar_label)
ticklabels = [f'-10^{b:.0f}' for b in logbins[::-1]]
ticklabels += [f'10^{b:.0f}' for b in logbins]
p.set_ticklabels(ticklabels)
if return_ax:
return ax
else:
return im
def nice_inward_ticks(ax,
xminor_skip=None,yminor_skip=None):
"""Make nice inward pointing ticks
Parameters
----------
ax : matplotlib axis object
xminor_skip, yminor_skip : int, optional
interval of "minor" ticks, if None, then no minor ticks
"""
ax.tick_params(direction='in',which='major',length=8,
top=True,right=True,pad=6)
if xminor_skip is not None:
ax.xaxis.set_minor_locator(MultipleLocator(xminor_skip))
if yminor_skip is not None:
ax.yaxis.set_minor_locator(MultipleLocator(yminor_skip))
if xminor_skip is not None or yminor_skip is not None:
top = xminor_skip is not None
right = yminor_skip is not None
ax.tick_params(direction='in',which='minor',length=5,
top=top,right=right,pad=6)
def fill_between_std(x,ymean,ystd,
ax=None,fill_alpha=0.4,**kwargs):
"""A simple version of fill between to reduce typing"""
fill_kwargs = copy(kwargs)
if 'alpha' in kwargs:
warn(f'Resetting fill_alpha with provided alpha={kwargs["alpha"]}')
fill_kwargs['alpha'] = kwargs['alpha']
else:
fill_kwargs['alpha'] = fill_alpha
ax.plot(x,ymean,**kwargs) if ax is not None else plt.plot(x,ymean,**kwargs)
ax.fill_between(x,ymean-ystd,ymean+ystd,**fill_kwargs) if ax is not None else \
plt.fill_between(x,ymean-ystd,ymean+ystd,**fill_kwargs)
def plot_section(fld, left, right,
datasets, grids,
labels=None,
collapse_dim='x',
plot_diff0=False,
plot_sections_at_bottom=False,
single_plot=False,
nrows=None,
ncols=5,
fig=None,
xr_kwargs={}):
"""Plot a field in each dataset provided along a section in the domain
Parameters
----------
fld : str
string denoting field to grab in each dataset
left, right : pair of floats
denoting in longitude/latitude the coordinates of the left and rightmost
points to get a section of
datasets : list of xarray Datasets
containing all the data
grids : list of or a single xgcm Grid object(s)
this allows one to get a section of the data
use a single grid if all datasets have same grid information
labels : list of strings, optional
corresponding to the different datasets to label in figure
collapse_dim : str, optional
dimension along which to collapse
plot_diff0 : bool, optional
plot difference between first dataset and all others
plot_sections_at_bottom : bool, optional
if True, add a row at the bottom showing the section line
for each field
single_plot : bool, optional
if True, plot all fields on one plot, better be 1D
ncols : int, optional
changes the relative width of the quantity being plotted
and the rightmost plot showing the section
fig : matplotlib figure object, optional
for a different figure size
xr_kwargs : dict, optional
arguments to pass to xarray's plotting wrapper
Returns
-------
fig : matplotlib figure object
axs : matplotlib axis object(s)
"""
# setup the plot
if not single_plot:
nrows = len(datasets) if not plot_sections_at_bottom else len(datasets)+1
else:
nrows = 1 if not plot_sections_at_bottom else 2
ncols = ncols if not plot_sections_at_bottom else len(datasets)
fig = plt.figure(figsize=(18,6*nrows)) if fig is None else fig
axs = []
gs = fig.add_gridspec(nrows,ncols)
# handle list or single
datasets = [datasets] if not isinstance(datasets,list) else datasets
grids = [grids] if not isinstance(grids,list) else grids
labels = [labels] if not isinstance(labels,list) else labels
# assumption: same grid for all datasets if length 1
if len(grids)==1:
grids = grids*nrows
if len(labels)==1:
labels = labels*nrows
# set colormap for depth plot with section
cmap_deep = copy(plt.get_cmap('cmo.deep'))
cmap_deep.set_bad('gray')
if single_plot:
ax = fig.add_subplot(gs[0,:-1]) if not plot_sections_at_bottom else \
fig.add_subplot(gs[0,:])
axs.append(ax)
for i,(ds,g,lbl) in enumerate(zip(datasets,grids,labels)):
# what to plot
plotme = ds[fld] - datasets[0][fld] if plot_diff0 and i!=0 else ds[fld]
# get the section as a mask
m={}
m['C'],m['W'],m['S'] = ecco.get_section_line_masks(left,right,ds,g)
# get coordinates for field
x,y,mask,sec_mask = _get_coords_and_mask(ds[fld].coords,m)
# replace collapse dim with actual name
rm_dim = x if collapse_dim == 'x' else y
# get mask and field
mask = mask.where(sec_mask,drop=True).mean(rm_dim)
plotme = plotme.where(sec_mask,drop=True).mean(rm_dim).where(mask)
# Plot the field
if len(plotme.dims)>1:
if single_plot:
raise TypeError('Can''t put multiple fields on single plot')
ax = fig.add_subplot(gs[i,:-1]) if not plot_sections_at_bottom else \
fig.add_subplot(gs[i,:])
axs.append(ax)
plotme.plot.contourf(y='Z',ax=ax,**xr_kwargs)
else:
if not single_plot:
ax = fig.add_subplot(gs[i,:-1]) if not plot_sections_at_bottom else \
fig.add_subplot(gs[i,:])
axs.append(ax)
plot_dim = x if rm_dim==y else y
plotme.plot.line(x=plot_dim,ax=ax,label=lbl,**xr_kwargs)
ax.grid()
if lbl is not None:
if not single_plot:
if plot_diff0 and i!=0:
ax.set_title(f'{fld}({lbl}) - {fld}({labels[0]})')
else:
ax.set_title(f'{fld}({lbl})')
else:
ax.legend()
# Plot the section
axb = fig.add_subplot(gs[i,-1]) if not plot_sections_at_bottom else \
fig.add_subplot(gs[-1,i])
datasets[i].Depth.where(datasets[i].maskC.any('Z')).plot(
ax=axb,cmap=cmap_deep,add_colorbar=False)
m['C'].cumsum(dim=rm_dim[0]+'C').where(m['C']).plot(ax=axb,cmap='Greys',add_colorbar=False)
axb.set(title=f'',ylabel='',xlabel='')
axs.append(axb)
return fig,axs
def plot_zlev_with_max(xda,use_mask=True,ax=None,xr_kwargs={}):
"""Make a 2D plot at the vertical level where data array
has it's largest value in amplitude
Parameters
----------
xda : xarray DataArray
with the field to be plotted, function of (Z,Y,X)
use_mask : bool, optional
mask the field
ax : matplotlib axis object, optional
current plotting axis
xr_kwargs : dict, optional
additional arguments for xarray plotting method
Returns
-------
z : float
height of zlevel at maximum
"""
def _make_float(xarr):
"""useful for putting x,y,z of max val in plot title"""
if len(xarr)>1:
warn(f'{xarr.name} has more than one max location, picking first...')
xarr=xarr[0]
return float(xarr.values)
xda_max = np.abs(xda).max()
x,y,mask = _get_coords_and_mask(xda.coords)
# get X, Y, Z of max value
xda_maxloc = xda.where(xda==xda_max,drop=True)
if len(xda_maxloc)==0:
xda_maxloc = xda.where(xda==-xda_max,drop=True)
xsel = _make_float(xda_maxloc[x])
ysel = _make_float(xda_maxloc[y])
zsel = _make_float(xda_maxloc['Z'])
# grab the zlev
xda = xda.sel(Z=zsel)
# mask?
if use_mask:
xda = xda.where(mask.sel(Z=zsel))
if ax is not None:
xda.plot(ax=ax,**xr_kwargs)
ax.set_title(f'max loc (x,y,z) = ({xsel:.2f},{ysel:.2f},{zsel:.2f})')
else:
xda.plot(**xr_kwargs)
plt.title(f'max loc (x,y,z) = ({xsel:.2f},{ysel:.2f},{zsel:.2f})')
return zsel
def horizontal_map(x,y,fld1,fld2=None,
title1=None,title2=None,
depth=None,log_data=False,
mask1=None,mask2=None,
ncolors=None,
c_lim=None,c_lim1=None,c_lim2=None,
cmap=None,cmap1=None,cmap2=None):
"""
Make a figure with plots of fld1 and fld2 over x,y next to e/o
Parameters
----------
x,y: Grid information, giving lat/lon coordinates
fld1/2: 2D field as numpy array or xarray DataArray
fld2 optional, otherwise generate single figure
Optional Parameters
-------------------
title1/2: string for title above figure
depth: depth field as an xarray DataArray to be used as
plt.contour(depth.XC,depth.YC,depth.Depth)
log_data: plot log_10(fld)
mask1/2: mask field to with given mask array
ncolors: Number of colors for colormap
c_lim: two element array with colorbar limits
c_lim1/2: different colorbar limits for each plot
c_lim is used for both, c_lim1/2 are for left or right plot
cmap: string or colormap object
default for sequential data is 'YlGnBu_r'
default for diverging data is 'BuBG_r'
cmap1/2: similar logic for c_lim, c_lim1/2.
cmap is global, cmap1/2 are for individual plots
Returns
-------
fig : matplotlib.figure.Figure object
"""
# Test for c_lim or c_lim1/2
if c_lim is not None and (c_lim1 is not None or c_lim2 is not None):
raise ValueError('Can only provide c_lim or c_lim1/2, not all three')
if cmap is not None and (cmap1 is not None or cmap2 is not None):
raise ValueError('Can only provide cmap or cmap1/2, not all three')
if c_lim is not None:
c_lim1 = c_lim
c_lim2 = c_lim
if cmap is not None:
cmap1 = cmap
cmap2 = cmap
fig = plt.figure(figsize=(15,6))
plt.subplot(1,2,1)
_single_horizontal_map(x,y,fld1,title1,depth,log_data,mask1,ncolors,c_lim1,cmap1)
if fld2 is not None:
plt.subplot(1,2,2)
_single_horizontal_map(x,y,fld2,title2,depth,log_data,mask2,ncolors,c_lim2,cmap2)
plt.show()
return fig
def depth_slice(x,z,fld1,fld2=None,
title1=None,title2=None,
depth=None,log_data=False,
mask1=None,mask2=None,
ncolors=None,
c_lim=None,c_lim1=None,c_lim2=None,
cmap=None,cmap1=None,cmap2=None):
"""
Make a slice through depth with plots of fld1 and fld2 and depth on y axis next to e/o
Parameters
----------
x,z: Grid information, x is some generic coordinate, z is depth
fld1/2: 2D field as numpy array or xarray DataArray
fld2 optional, otherwise generate single figure
Optional Parameters
-------------------
title1/2: string for title above figure
depth: depth field as an xarray DataArray to be used as
plt.contour(depth.XC,depth.YC,depth.Depth)
log_data: plot log_10(fld)
mask1/2: mask field to with given mask array
ncolors: Number of colors for colormap
c_lim: two element array with colorbar limits
c_lim1/2: different colorbar limits for each plot
c_lim is used for both, c_lim1/2 are for left or right plot
cmap: string or colormap object
default for sequential data is 'YlGnBu_r'
default for diverging data is 'BuBG_r'
cmap1/2: similar logic for c_lim, c_lim1/2.
cmap is global, cmap1/2 are for individual plots
Returns
-------
fig : matplotlib.figure.Figure object
"""
# Test for c_lim or c_lim1/2
if c_lim is not None and (c_lim1 is not None or c_lim2 is not None):
raise ValueError('Can only provide c_lim or c_lim1/2, not all three')
if cmap is not None and (cmap1 is not None or cmap2 is not None):
raise ValueError('Can only provide cmap or cmap1/2, not all three')
if c_lim is not None:
c_lim1 = c_lim
c_lim2 = c_lim
if cmap is not None:
cmap1 = cmap
cmap2 = cmap
fig = plt.figure(figsize=(15,6))
plt.subplot(1,2,1)
_single_depth_slice(x,z,fld1,title1,depth,log_data,mask1,ncolors,c_lim1,cmap1)
if fld2 is not None:
plt.subplot(1,2,2)
_single_depth_slice(x,z,fld2,title2,depth,log_data,mask2,ncolors,c_lim2,cmap2)
plt.show()
return fig
def _single_horizontal_map(x,y,fld,titleStr,depth,log_data,mask,ncolors,c_lim,cmap):
"""
Non-user facing function to distill horizontal data to numpy array for plotting
"""
if isinstance(fld, np.ndarray):
if len(np.shape(fld))==2:
fld_values = fld
fld_name = ''
elif len(np.shape(fld))==3:
print('Warning: input fld is 3D, taking fld[0,:,:]')
fld_values = fld[0,:,:]
fld_name = ''
else:
raise TypeError('Input field is >3D and I don''t want to guess the 2 dims to grab')
else:
# Assume xarray DataArray
if 'time' in fld.dims:
print('Warning: Time dimension present, grabbing first record')
fld=fld.isel(time=0)
if 'Z' in fld.dims:
print('Warning: Z dimension present, grabbing top layer')
fld=fld.isel(Z=0)
fld_values = fld.values
fld_name = fld.name
# If desired, mask the field
# Note: do this before getting cbar limits
if mask is not None:
if not isinstance(fld,np.ndarray):
#Assume xarray DataArray
mask = mask.values
mask = np.where(mask==0,np.NAN,1)
fld_values = fld_values * mask
_nice_plot(x,y,fld_values,titleStr,depth,log_data,mask,ncolors,c_lim,cmap)
def _single_depth_slice(x,z,fld,titleStr,depth,log_data,mask,ncolors,c_lim,cmap):
"""
Non-user facing function to distill depth slice data to numpy array for plotting
"""
if isinstance(fld, np.ndarray):
if len(np.shape(fld))==2:
fld_values = fld
fld_name = ''
elif len(np.shape(fld))==3:
print('Warning: input fld is 3D, taking fld[0,:,:]')
fld_values = fld[0,:,:]
fld_name = ''
else:
raise TypeError('Input field is >3D and I don''t want to guess the 2 dims to grab')
else:
# Assume xarray DataArray
if 'time' in fld.dims:
print('Warning: Time dimension present, grabbing first record')
fld=fld.isel(time=0)
# Can't do this for other dimensions because who knows what they will be
fld_values = fld.values
fld_name = fld.name
# If desired, mask the field
# Note: do this before getting cbar limits
if mask is not None:
if not isinstance(fld,np.ndarray):
#Assume xarray DataArray
mask = mask.values
mask = np.where(mask==0,np.NAN,1)
fld_values = fld_values * mask
_nice_plot(x,z,fld_values,titleStr,depth,log_data,mask,ncolors,c_lim,cmap)
def _nice_plot(x,y,fld_values,titleStr,depth,log_data,mask,ncolors,c_lim,cmap):
"""
Generic plotting routine for pcolormesh
"""
# If desired, take log_10 of data
if log_data:
fld_values = np.where(fld_values==0,np.NAN,fld_values)
fld_values = np.log10(fld_values)
# Set colorbar limits
fld_max = | np.nanmax(fld_values) | numpy.nanmax |
import numpy as np
def get_epipole(F):
'''
Epipole is the eigenvector associated with smallest eigenvalue of F
'''
evalue, evector = np.linalg.eig(F) # normalized evector
index = np.argmin(evalue)
epipole = evector[:, index]
return epipole
def get_rotation_axis(d):
# d_i to make image plane parallel
# intersection line
axis = np.array([-d[1], d[0], 0])
return axis
def get_angle(epipole, axis):
return np.arctan(epipole[2] / (axis[1] * epipole[0] - axis[0] * epipole[1]))
def get_plane_rotation_matrix(axis, angle):
cos_angle = np.cos(angle)
sin_angle = np.sin(angle)
dx = axis[0]
dx2 = dx ** 2
dy = axis[1]
dy2 = dy ** 2
return np.array([[dx2 + (1 - dx2) * cos_angle, dx * dy * (1 - cos_angle), dy * sin_angle],
[dx * dy * (1 - cos_angle), dy2 + (1 - dy2) * cos_angle, -dx * sin_angle],
[-dy * sin_angle, dx * sin_angle, cos_angle]])
def get_scanline_rotation_matrix(angle):
cos = | np.cos(angle) | numpy.cos |
"""Auditory Filterbanks and scales for Speech and Audio Analysis.
The Gammatone filterbank is a direct translation of <NAME>' Gammatone-like
spectrograms package [1], which is partly and a direct translation of Malcolm
Slaney's Auditory toolbox [2].
References:
[1]: https://labrosa.ee.columbia.edu/matlab/gammatonegram/
[2]: https://engineering.purdue.edu/~malcolm/interval/1998-010/
"""
import numpy as np
from scipy import signal
from .util import fftfreqz, freqz
def dft2mel(nfft, sr=8000., nfilts=0, width=1., minfrq=0., maxfrq=4000.,
sphinx=False, constamp=True):
"""Map linear discrete frequencies to Mel scale."""
if nfilts == 0:
nfilts = np.int(np.ceil(hz2mel(np.array([maxfrq]), sphinx)[0]/2))
weights = np.zeros((nfilts, nfft))
# dft index -> linear frequency in hz
dftfrqs = np.arange(nfft/2+1, dtype=np.float)/nfft * sr
maxmel, minmel = hz2mel(np.array([maxfrq, minfrq]), sphinx)
binfrqs = mel2hz(minmel+np.linspace(0., 1., nfilts+2)
* (maxmel-minmel), sphinx)
for i in range(nfilts):
fs = binfrqs[i:i+3].copy()
fs = fs[1] + width*(fs-fs[1]) # adjust bandwidth if needed
loslope = (dftfrqs - fs[0])/(fs[1] - fs[0])
hislope = (fs[2] - dftfrqs)/(fs[2] - fs[1])
weights[i, 0:nfft/2+1] = np.maximum(0, np.minimum(loslope, hislope))
if constamp:
# Slaney-style mel is scaled to be approx constant E per channel
weights = np.diag(
2/(binfrqs[2:nfilts+2]-binfrqs[:nfilts])).dot(weights)
weights[:, nfft/2+1:] = 0 # avoid aliasing
return weights, binfrqs[1:]
def hz2dft(freq, sr, nfft):
"""Map frequency in Hz to discrete Fourier transform bins.
Parameters
----------
freq: array_like
Frequency in hz
sr: int
Sampling rate in hz
nfft: int
Number of DFT bins in range [0, 2*pi)
Returns
-------
bins: array_like
Frequency bin numbers
"""
return (freq/sr * nfft).astype('int')
def hz2mel(f, sphinx=True):
"""Convert linear frequency to mel frequency scale."""
if sphinx:
return 2595. * np.log10(1+f/700.)
# match Slaney's toolbox
f0, f_sp, brkfrq = 0., 200./3, 1000.
brkpt = (brkfrq - f0) / f_sp
logstep = np.exp(np.log(6.4)/27.)
z = np.empty_like(f)
lower = f < brkfrq # np.less(f,brkfrq)
higher = np.logical_not(lower)
z[lower] = (f[lower] - f0) / f_sp
z[higher] = brkpt + np.log(f[higher]/brkfrq) / np.log(logstep)
return z
def mel2hz(z, sphinx=True):
"""Convert Mel frequency to linear frequency scale."""
if sphinx:
return 700*(10**(z/2595.)-1)
f0, f_sp, brkfrq = 0., 200./3, 1000.
brkpt = (brkfrq - f0) / f_sp
logstep = np.exp(np.log(6.4)/27.)
f = np.empty_like(z)
lower = z < brkpt # np.less(z,brkpt)
higher = np.logical_not(lower)
f[lower] = f0 + z[lower] * f_sp
f[higher] = brkfrq * np.exp(np.log(logstep)*(z[higher]-brkpt))
return f
# ERB-related Functions starting below
# Global Parameters
# Change the following three parameters if you wish to use a different
# ERB scale. Must change in MakeERBCoeffs too.
ERB_EAR_Q = 9.26449 # Glasberg and Moore Parameters
ERB_MIN_BW = 24.7
ERB_ORDER = 1
# Process an input waveform with a gammatone filter bank. This function
# takes a single sound vector, and returns an array of filter outputs, one
# channel per row.
#
# The fcoefs parameter, which completely specifies the Gammatone filterbank,
# should be designed with the MakeERBFilters function. If it is omitted,
# the filter coefficients are computed for you assuming a 22050Hz sampling
# rate and 64 filters regularly spaced on an ERB scale from fs/2 down to 100Hz.
#
# <NAME> @ Interval, June 11, 1998.
# (c) 1998 Interval Research Corporation
# Thanks to <NAME>' for his suggestions and improvements.
def erb_fbank(sig, A0, A11, A12, A13, A14, A2, B0, B1, B2, gain, cascade=True):
"""Filter a signal using ERB filterbanks."""
if cascade: # original implementation. Might be numerically more stable.
y1 = signal.lfilter([A0/gain, A11/gain, A2/gain], [B0, B1, B2], sig)
y2 = signal.lfilter([A0, A12, A2], [B0, B1, B2], y1)
y3 = signal.lfilter([A0, A13, A2], [B0, B1, B2], y2)
y = signal.lfilter([A0, A14, A2], [B0, B1, B2], y3)
return y
else: # merge the difference EQ above into one
b = np.convolve(np.convolve([A0, A11, A2], [A0, A12, A2]),
np.convolve([A0, A13, A2], [A0, A14, A2])) / gain
a = np.convolve( | np.convolve([B0, B1, B2], [B0, B1, B2]) | numpy.convolve |
# flake8: noqa
"""
github.com/mikedh/trimesh
----------------------------
Library for importing, exporting and doing simple operations on triangular meshes.
"""
from . import ray
from . import util
from . import units
from . import poses
from . import graph
from . import sample
from . import repair
from . import convex
from . import remesh
from . import bounds
from . import caching
from . import inertia
from . import nsphere
from . import boolean
from . import grouping
from . import geometry
from . import permutate
from . import proximity
from . import triangles
from . import collision
from . import curvature
from . import smoothing
from . import comparison
from . import registration
from . import decomposition
from . import intersections
from . import transformations
from .visual import create_visual
from .exchange.export import export_mesh
from .constants import log, log_time, tol
from .scene import Scene
from .parent import Geometry
import copy
import numpy as np
class Trimesh(Geometry):
def __init__(self,
vertices=None,
faces=None,
face_normals=None,
vertex_normals=None,
face_colors=None,
vertex_colors=None,
face_attributes=None,
vertex_attributes=None,
metadata=None,
process=True,
validate=False,
use_embree=True,
initial_cache=None,
visual=None,
**kwargs):
"""
A Trimesh object contains a triangular 3D mesh.
Parameters
------------
vertices : (n, 3) float
Array of vertex locations
faces : (m, 3) or (m, 4) int
Array of triangular or quad faces (triangulated on load)
face_normals : (m, 3) float
Array of normal vectors corresponding to faces
vertex_normals : (n, 3) float
Array of normal vectors for vertices
metadata : dict
Any metadata about the mesh
process : bool
if True, Nan and Inf values will be removed
immediately and vertices will be merged
validate : bool
If True, degenerate and duplicate faces will be
removed immediately, and some functions will alter
the mesh to ensure consistent results.
use_embree : bool
If True try to use pyembree raytracer.
If pyembree is not available it will automatically fall
back to a much slower rtree/numpy implementation
initial_cache : dict
A way to pass things to the cache in case expensive
things were calculated before creating the mesh object.
visual : ColorVisuals or TextureVisuals
Assigned to self.visual
"""
if initial_cache is None:
initial_cache = {}
# self._data stores information about the mesh which
# CANNOT be regenerated.
# in the base class all that is stored here is vertex and
# face information
# any data put into the store is converted to a TrackedArray
# which is a subclass of np.ndarray that provides md5 and crc
# methods which can be used to detect changes in the array.
self._data = caching.DataStore()
# self._cache stores information about the mesh which CAN be
# regenerated from self._data, but may be slow to calculate.
# In order to maintain consistency
# the cache is cleared when self._data.crc() changes
self._cache = caching.Cache(
id_function=self._data.fast_hash,
force_immutable=True)
self._cache.update(initial_cache)
# check for None only to avoid warning messages in subclasses
if vertices is not None:
# (n, 3) float, set of vertices
self.vertices = vertices
if faces is not None:
# (m, 3) int of triangle faces, references self.vertices
self.faces = faces
# hold visual information about the mesh (vertex and face colors)
if visual is None:
self.visual = create_visual(
face_colors=face_colors,
vertex_colors=vertex_colors,
mesh=self)
else:
self.visual = visual
# normals are accessed through setters/properties and are regenerated
# if dimensions are inconsistent, but can be set by the constructor
# to avoid a substantial number of cross products
if face_normals is not None:
self.face_normals = face_normals
# (n, 3) float of vertex normals, can be created from face normals
if vertex_normals is not None:
self.vertex_normals = vertex_normals
# embree is a much, much faster raytracer written by Intel
# if you have pyembree installed you should use it
# although both raytracers were designed to have a common API
if ray.has_embree and use_embree:
self.ray = ray.ray_pyembree.RayMeshIntersector(self)
else:
# create a ray-mesh query object for the current mesh
# initializing is very inexpensive and object is convenient to have.
# On first query expensive bookkeeping is done (creation of r-tree),
# and is cached for subsequent queries
self.ray = ray.ray_triangle.RayMeshIntersector(self)
# a quick way to get permuted versions of the current mesh
self.permutate = permutate.Permutator(self)
# convenience class for nearest point queries
self.nearest = proximity.ProximityQuery(self)
# store metadata about the mesh in a dictionary
self.metadata = dict()
# update the mesh metadata with passed metadata
if isinstance(metadata, dict):
self.metadata.update(metadata)
elif metadata is not None:
raise ValueError(
'metadata should be a dict or None, got %s' % str(metadata))
# Set the default center of mass and density
self._density = 1.0
self._center_mass = None
# store per-face and per-vertex attributes which will
# be updated when an update_faces call is made
self.face_attributes = {}
self.vertex_attributes = {}
# use update to copy items
if face_attributes is not None:
self.face_attributes.update(face_attributes)
if vertex_attributes is not None:
self.vertex_attributes.update(vertex_attributes)
# process will remove NaN and Inf values and merge vertices
# if validate, will remove degenerate and duplicate faces
if process or validate:
self.process(validate=validate, **kwargs)
# save reference to kwargs
self._kwargs = kwargs
def process(self, validate=False, **kwargs):
"""
Do processing to make a mesh useful.
Does this by:
1) removing NaN and Inf values
2) merging duplicate vertices
If validate:
3) Remove triangles which have one edge of their rectangular 2D
oriented bounding box shorter than tol.merge
4) remove duplicated triangles
5) ensure triangles are consistently wound
and normals face outwards
Parameters
------------
validate : bool
If True, remove degenerate and duplicate faces
Returns
------------
self: trimesh.Trimesh
Current mesh
"""
# if there are no vertices or faces exit early
if self.is_empty:
return self
# avoid clearing the cache during operations
with self._cache:
self.remove_infinite_values()
self.merge_vertices(**kwargs)
# if we're cleaning remove duplicate
# and degenerate faces
if validate:
self.remove_duplicate_faces()
self.remove_degenerate_faces()
self.fix_normals()
# since none of our process operations moved vertices or faces
# we can keep face and vertex normals in the cache without recomputing
# if faces or vertices have been removed, normals are validated before
# being returned so there is no danger of inconsistent dimensions
self._cache.clear(exclude=['face_normals',
'vertex_normals'])
self.metadata['processed'] = True
return self
def md5(self):
"""
An MD5 of the core geometry information for the mesh,
faces and vertices.
Generated from TrackedArray which subclasses np.ndarray to
monitor array for changes and returns a correct lazily
evaluated md5 so it only has to recalculate the hash
occasionally, rather than on every call.
Returns
----------
md5 : string
MD5 of everything in the DataStore
"""
md5 = self._data.md5()
return md5
def crc(self):
"""
A zlib.adler32 checksum for the current mesh data.
This is about 5x faster than an MD5, and the checksum is
checked every time something is requested from the cache so
it gets called a lot.
Returns
----------
crc : int
Checksum of current mesh data
"""
return self._data.fast_hash()
@property
def faces(self):
"""
The faces of the mesh.
This is regarded as core information which cannot be regenerated from
cache, and as such is stored in self._data which tracks the array for
changes and clears cached values of the mesh if this is altered.
Returns
----------
faces : (n, 3) int
Representing triangles which reference self.vertices
"""
return self._data.get('faces', np.empty(shape=(0, 3), dtype=np.int64))
@faces.setter
def faces(self, values):
"""
Set the vertex indexes that make up triangular faces.
Parameters
--------------
values : (n, 3) int
Indexes of self.vertices
"""
if values is None or len(values) == 0:
if 'faces' in self._data:
del self._data['faces']
return
values = | np.asanyarray(values, dtype=np.int64) | numpy.asanyarray |
import cv2
import os
import numpy as np
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = | np.abs(M[0, 0]) | numpy.abs |
# Adapted for numpy/ma/cdms2 by convertcdms.py
import MV2
import cdms2
import genutil
import unidata
import vcs
import numpy
from vcs import VCS_validation_functions
thermo_objects = []
def Es(T, method=None):
"""Computes saturated pressure in Pa given T in K, using the method:
1: Hyland-Wexler formulation, polynomial coeff (absolute norm)
2: Wexler formulation
3: Hyland-Wexler formulation, polynomial coeff (relative error norm)
4: classic Goff Gratch equation
5: 6.112*numpy.ma.exp(17.67*tempc/(tempc+243.5))
Default is method 1
Note: 1 and 2 use method 3 where T is not : 173.15 < T < 473.15
ref for 1, 2 and 3:
<NAME>., Journal of Applied Met., Vol 31, Dec 1992
( http://ams.allenpress.com/perlserv/?request=get-document&\
doi=10.1175%2F1520-0450(1992)031%3C1507%3APFTSVP%3E2.0.CO%3B2&ct=1 )
"""
if method is None:
method = 1
if method == 1:
# Put in C
x = T - 273.15
# Water vapor
c0 = 0.611220713E03
c1 = 0.443944344E02
c2 = 0.143195336E01
c3 = 0.263350515E-01
c4 = 0.310636053E-03
c5 = 0.185218710E-05
c6 = 0.103440324E-07
c7 = -0.468258100E-10
c8 = 0.466533033E-13
eswat = c0 + x * (c1 + x * (c2 + x * (c3 + x * (c4 + x * (c5 + x * (c6 +
x * (c7 + x * c8)))))))
# ice
c0 = .611153246E03
c1 = .503261230E02
c2 = .188595709E01
c3 = .422115970E-01
c4 = .620376691E-03
c5 = .616082536E-05
c6 = .405172828E-07
c7 = .161492905E-09
c8 = .297886454E-12
esice = c0 + x * (c1 + x * (c2 + x * (c3 + x * (c4 + x * (c5 + x * (c6 +
x * (c7 + x * c8)))))))
# Combine
es = MV2.where(MV2.less(T, 273.15), esice, eswat)
# Overwrite values outside valid range with method 2
mn, mx = genutil.minmax(T)
if mn < 173.16 or mx > 473.15:
es2 = Es(T, method=2)
es = MV2.where(MV2.less(T, 173.16), es2, es)
es = MV2.where(MV2.greater(T, 473.15), es2, es)
elif method == 2:
# over water
g0 = -0.29912729E4
g1 = -0.60170128E4
g2 = 0.1887643854E2
g3 = -0.28354721E-1
g4 = 0.17838301E-4
g5 = -0.84150417E-9
g6 = 0.44412543E-12
g7 = 0.2858487E1
# over ice
k0 = -0.58653696e4
k1 = 0.2224103300E2
k2 = 0.13749042E-1
k3 = -0.34031775E-4
k4 = 0.26967687E-7
k5 = 0.6918651
esice = (k0 + (k1 + k5 * MV2.log(T) + (k2 + (
k3 + k4 * T) * T) * T) * T) / T # over ice
eswat = (g0 + (g1 + (g2 + g7 * MV2.log(T) + (g3 + (g4 + (
g5 + g6 * T) * T) * T) * T) * T) * T) / T ** 2 # over water
es = MV2.where(MV2.less(T, 273.15), esice, eswat)
es = MV2.exp(es)
elif method == 3:
# Put in C
x = T - 273.15
# Water vapor
c0 = 0.611213476E03
c1 = 0.444007856E02
c2 = 0.143064234E01
c3 = 0.264461437E-01
c4 = 0.305930558E-03
c5 = 0.196237241E-05
c6 = 0.892344772E-08
c7 = -0.373208410E-10
c8 = 0.209339997E-13
eswat = c0 + x * (c1 + x * (c2 + x * (c3 + x * (c4 + x * (
c5 + x * (c6 + x * (c7 + x * c8)))))))
# ice
c0 = .611123516E03
c1 = .503109514E02
c2 = .1888369801E01
c3 = .420547422E-01
c4 = .614396778E-03
c5 = .602780717E-05
c6 = .387940929E-07
c7 = .149436277E-09
c8 = .262655803E-12
esice = c0 + x * (c1 + x * (c2 + x * (c3 + x * (c4 + x * (
c5 + x * (c6 + x * (c7 + x * c8)))))))
# Combine
es = MV2.where(MV2.less(T, 273.15), esice, eswat)
# Overwrite values outside valid range with method 2
mn, mx = genutil.minmax(T)
if mn < 173.16 or mx > 473.15:
es2 = Es(T, method=2)
es = MV2.where(MV2.less(T, 173.16), es2, es)
es = MV2.where(MV2.greater(T, 473.15), es2, es)
elif method == 4:
est = 101324.6 # Pa
Ts = 373.16 / T
a = -7.90298
b = 5.02808
c = -1.3816E-7
d = 11.344
f = 8.1328E-3
h = -3.49149
maxexp = int(numpy.log10(numpy.finfo(numpy.float).max))
minexp = 1 - a
es = a * (Ts - 1.)
es = es + b * numpy.ma.log10(Ts)
A = d * (1. - Ts)
A = numpy.ma.masked_greater(A, maxexp)
A = numpy.ma.masked_less(A, minexp)
es = es + c * ( | numpy.ma.power(10, A) | numpy.ma.power |
from __future__ import print_function
import copy
import os
import sys
import time
import unittest
from nose.plugins.skip import SkipTest
from nose.tools import assert_raises
import numpy
from six.moves import xrange
import theano
from theano import tensor, config
from theano.sandbox import rng_mrg
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.sandbox.cuda import cuda_available
from theano.tests import unittest_tools as utt
from theano.tests.unittest_tools import attr
if cuda_available:
from theano.sandbox.cuda import float32_shared_constructor
# TODO: test gpu
# Done in test_consistency_GPU_{serial,parallel}
# TODO: test MRG_RandomStreams
# Partly done in test_consistency_randomstreams
# TODO: test optimizer mrg_random_make_inplace
# TODO: make tests work when no flags gived. Now need:
# THEANO_FLAGS=device=gpu0,floatX=float32
# Partly done, in test_consistency_GPU_{serial,parallel}
mode = config.mode
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
utt.seed_rng()
# Results generated by Java code using L'Ecuyer et al.'s code, with:
# main seed: [12345]*6 (default)
# 12 streams
# 7 substreams for each stream
# 5 samples drawn from each substream
java_samples = numpy.loadtxt(os.path.join(os.path.split(theano.__file__)[0],
'sandbox',
'samples_MRG31k3p_12_7_5.txt'))
def test_deterministic():
seed = utt.fetch_seed()
sample_size = (10, 20)
test_use_cuda = [False]
if cuda_available:
test_use_cuda.append(True)
for use_cuda in test_use_cuda:
# print 'use_cuda =', use_cuda
R = MRG_RandomStreams(seed=seed, use_cuda=use_cuda)
u = R.uniform(size=sample_size)
f = theano.function([], u)
fsample1 = f()
fsample2 = f()
assert not numpy.allclose(fsample1, fsample2)
R2 = MRG_RandomStreams(seed=seed, use_cuda=use_cuda)
u2 = R2.uniform(size=sample_size)
g = theano.function([], u2)
gsample1 = g()
gsample2 = g()
assert numpy.allclose(fsample1, gsample1)
assert numpy.allclose(fsample2, gsample2)
def test_consistency_randomstreams():
"""
Verify that the random numbers generated by MRG_RandomStreams
are the same as the reference (Java) implementation by L'Ecuyer et al.
"""
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7
test_use_cuda = [False]
if cuda_available:
test_use_cuda.append(True)
for use_cuda in test_use_cuda:
# print 'use_cuda =', use_cuda
samples = []
rng = MRG_RandomStreams(seed=seed, use_cuda=use_cuda)
for i in range(n_streams):
stream_samples = []
u = rng.uniform(size=(n_substreams,), nstreams=n_substreams)
f = theano.function([], u)
for j in range(n_samples):
s = f()
stream_samples.append(s)
stream_samples = numpy.array(stream_samples)
stream_samples = stream_samples.T.flatten()
samples.append(stream_samples)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def test_consistency_cpu_serial():
"""
Verify that the random numbers generated by mrg_uniform, serially,
are the same as the reference (Java) implementation by L'Ecuyer et al.
"""
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7
samples = []
curr_rstate = numpy.array([seed] * 6, dtype='int32')
for i in range(n_streams):
stream_rstate = curr_rstate.copy()
for j in range(n_substreams):
rstate = theano.shared(numpy.array([stream_rstate.copy()],
dtype='int32'))
new_rstate, sample = rng_mrg.mrg_uniform.new(rstate, ndim=None,
dtype=config.floatX,
size=(1,))
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate
sample.update = (rstate, new_rstate)
rstate.default_update = new_rstate
f = theano.function([], sample)
for k in range(n_samples):
s = f()
samples.append(s)
# next substream
stream_rstate = rng_mrg.ff_2p72(stream_rstate)
# next stream
curr_rstate = rng_mrg.ff_2p134(curr_rstate)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def test_consistency_cpu_parallel():
"""
Verify that the random numbers generated by mrg_uniform, in parallel,
are the same as the reference (Java) implementation by L'Ecuyer et al.
"""
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7 # 7 samples will be drawn in parallel
samples = []
curr_rstate = numpy.array([seed] * 6, dtype='int32')
for i in range(n_streams):
stream_samples = []
rstate = [curr_rstate.copy()]
for j in range(1, n_substreams):
rstate.append(rng_mrg.ff_2p72(rstate[-1]))
rstate = numpy.asarray(rstate)
rstate = theano.shared(rstate)
new_rstate, sample = rng_mrg.mrg_uniform.new(rstate, ndim=None,
dtype=config.floatX,
size=(n_substreams,))
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate
sample.update = (rstate, new_rstate)
rstate.default_update = new_rstate
f = theano.function([], sample)
for k in range(n_samples):
s = f()
stream_samples.append(s)
samples.append(numpy.array(stream_samples).T.flatten())
# next stream
curr_rstate = rng_mrg.ff_2p134(curr_rstate)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def test_consistency_GPU_serial():
"""
Verify that the random numbers generated by GPU_mrg_uniform, serially,
are the same as the reference (Java) implementation by L'Ecuyer et al.
"""
if not cuda_available:
raise SkipTest('Optional package cuda not available')
if config.mode == 'FAST_COMPILE':
mode = 'FAST_RUN'
else:
mode = config.mode
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7
samples = []
curr_rstate = numpy.array([seed] * 6, dtype='int32')
for i in range(n_streams):
stream_rstate = curr_rstate.copy()
for j in range(n_substreams):
substream_rstate = numpy.array(stream_rstate.copy(), dtype='int32')
# HACK - we transfer these int32 to the GPU memory as float32
# (reinterpret_cast)
tmp_float_buf = numpy.frombuffer(substream_rstate.data,
dtype='float32')
# Transfer to device
rstate = float32_shared_constructor(tmp_float_buf)
new_rstate, sample = rng_mrg.GPU_mrg_uniform.new(rstate, ndim=None,
dtype='float32',
size=(1,))
rstate.default_update = new_rstate
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate
sample.update = (rstate, new_rstate)
# We need the sample back in the main memory
cpu_sample = tensor.as_tensor_variable(sample)
f = theano.function([], cpu_sample, mode=mode)
for k in range(n_samples):
s = f()
samples.append(s)
# next substream
stream_rstate = rng_mrg.ff_2p72(stream_rstate)
# next stream
curr_rstate = rng_mrg.ff_2p134(curr_rstate)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def test_consistency_GPU_parallel():
"""
Verify that the random numbers generated by GPU_mrg_uniform, in
parallel, are the same as the reference (Java) implementation by
<NAME> al.
"""
if not cuda_available:
raise SkipTest('Optional package cuda not available')
if config.mode == 'FAST_COMPILE':
mode = 'FAST_RUN'
else:
mode = config.mode
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7 # 7 samples will be drawn in parallel
samples = []
curr_rstate = numpy.array([seed] * 6, dtype='int32')
for i in range(n_streams):
stream_samples = []
rstate = [curr_rstate.copy()]
for j in range(1, n_substreams):
rstate.append(rng_mrg.ff_2p72(rstate[-1]))
rstate = numpy.asarray(rstate).flatten()
# HACK - transfer these int32 to the GPU memory as float32
# (reinterpret_cast)
tmp_float_buf = numpy.frombuffer(rstate.data, dtype='float32')
# Transfer to device
rstate = float32_shared_constructor(tmp_float_buf)
new_rstate, sample = rng_mrg.GPU_mrg_uniform.new(rstate, ndim=None,
dtype='float32',
size=(n_substreams,))
rstate.default_update = new_rstate
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate
sample.update = (rstate, new_rstate)
# We need the sample back in the main memory
cpu_sample = tensor.as_tensor_variable(sample)
f = theano.function([], cpu_sample, mode=mode)
for k in range(n_samples):
s = f()
stream_samples.append(s)
samples.append(numpy.array(stream_samples).T.flatten())
# next stream
curr_rstate = rng_mrg.ff_2p134(curr_rstate)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def test_GPU_nstreams_limit():
"""
Verify that a ValueError is raised when n_streams
is greater than 2**20 on GPU. This is the value of
(NUM_VECTOR_OP_THREADS_PER_BLOCK * NUM_VECTOR_OP_BLOCKS).
"""
if not cuda_available:
raise SkipTest('Optional package cuda not available')
seed = 12345
R = MRG_RandomStreams(seed=seed, use_cuda=True)
def eval_uniform(size, nstreams):
if theano.config.mode == "FAST_COMPILE":
mode = "FAST_RUN"
else:
mode = copy.copy(theano.compile.get_default_mode())
mode.check_py_code = False
out = R.uniform(size=size, nstreams=nstreams, dtype='float32')
f = theano.function([], out, mode=mode)
return f()
eval_uniform((10,), 2**20)
assert_raises(ValueError, eval_uniform, (10,), 2**20 + 1)
def test_consistency_GPUA_serial():
"""
Verify that the random numbers generated by GPUA_mrg_uniform, serially,
are the same as the reference (Java) implementation by <NAME> al.
"""
from theano.sandbox.gpuarray.tests.test_basic_ops import \
mode_with_gpu as mode
from theano.sandbox.gpuarray.type import gpuarray_shared_constructor
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7
samples = []
curr_rstate = numpy.array([seed] * 6, dtype='int32')
for i in range(n_streams):
stream_rstate = curr_rstate.copy()
for j in range(n_substreams):
substream_rstate = numpy.array([stream_rstate.copy()],
dtype='int32')
# Transfer to device
rstate = gpuarray_shared_constructor(substream_rstate)
new_rstate, sample = rng_mrg.GPUA_mrg_uniform.new(rstate,
ndim=None,
dtype='float32',
size=(1,))
rstate.default_update = new_rstate
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate
sample.update = (rstate, new_rstate)
# We need the sample back in the main memory
cpu_sample = tensor.as_tensor_variable(sample)
f = theano.function([], cpu_sample, mode=mode)
for k in range(n_samples):
s = f()
samples.append(s)
# next substream
stream_rstate = rng_mrg.ff_2p72(stream_rstate)
# next stream
curr_rstate = rng_mrg.ff_2p134(curr_rstate)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def test_consistency_GPUA_parallel():
"""
Verify that the random numbers generated by GPUA_mrg_uniform, in
parallel, are the same as the reference (Java) implementation by
<NAME> al.
"""
from theano.sandbox.gpuarray.tests.test_basic_ops import \
mode_with_gpu as mode
from theano.sandbox.gpuarray.type import gpuarray_shared_constructor
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7 # 7 samples will be drawn in parallel
samples = []
curr_rstate = numpy.array([seed] * 6, dtype='int32')
for i in range(n_streams):
stream_samples = []
rstate = [curr_rstate.copy()]
for j in range(1, n_substreams):
rstate.append(rng_mrg.ff_2p72(rstate[-1]))
rstate = numpy.asarray(rstate)
rstate = gpuarray_shared_constructor(rstate)
new_rstate, sample = rng_mrg.GPUA_mrg_uniform.new(rstate, ndim=None,
dtype='float32',
size=(n_substreams,))
rstate.default_update = new_rstate
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate
sample.update = (rstate, new_rstate)
# We need the sample back in the main memory
cpu_sample = tensor.as_tensor_variable(sample)
f = theano.function([], cpu_sample, mode=mode)
for k in range(n_samples):
s = f()
stream_samples.append(s)
samples.append(numpy.array(stream_samples).T.flatten())
# next stream
curr_rstate = rng_mrg.ff_2p134(curr_rstate)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples))
def basictest(f, steps, sample_size, prefix="", allow_01=False, inputs=None,
target_avg=0.5, target_std=None, mean_rtol=0.01, std_tol=0.01):
if inputs is None:
inputs = []
dt = 0.0
avg_var = 0.0
for i in xrange(steps):
t0 = time.time()
ival = f(*inputs)
assert ival.shape == sample_size
dt += time.time() - t0
ival = numpy.asarray(ival)
if i == 0:
mean = numpy.array(ival, copy=True)
avg_var = numpy.mean((ival - target_avg) ** 2)
min_ = ival.min()
max_ = ival.max()
else:
alpha = 1.0 / (1 + i)
mean = alpha * ival + (1 - alpha) * mean
avg_var = (alpha * numpy.mean((ival - target_avg) ** 2) +
(1 - alpha) * avg_var)
min_ = min(min_, ival.min())
max_ = max(max_, ival.max())
if not allow_01:
assert min_ > 0
assert max_ < 1
if hasattr(target_avg, 'shape'): # looks if target_avg is an array
diff = numpy.mean(abs(mean - target_avg))
# print prefix, 'mean diff with mean', diff
assert numpy.all(diff < mean_rtol * (1 + abs(target_avg))), (
'bad mean? %s %s' % (mean, target_avg))
else:
# if target_avg is a scalar, then we can do the mean of
# `mean` to get something more precise
mean = numpy.mean(mean)
# print prefix, 'mean', mean
assert abs(mean - target_avg) < mean_rtol * (1 + abs(target_avg)), (
'bad mean? %f %f' % (mean, target_avg))
std = numpy.sqrt(avg_var)
# print prefix, 'var', avg_var
# print prefix, 'std', std
if target_std is not None:
assert abs(std - target_std) < std_tol * (1 + abs(target_std)), (
'bad std? %f %f %f' % (std, target_std, std_tol))
# print prefix, 'time', dt
# print prefix, 'elements', steps * sample_size[0] * sample_size[1]
# print prefix, 'samples/sec', steps * sample_size[0] * sample_size[1] / dt
# print prefix, 'min', min_, 'max', max_
def test_uniform():
# TODO: test param low, high
# TODO: test size=None
# TODO: test ndim!=size.ndim
# TODO: test bad seed
# TODO: test size=Var, with shape that change from call to call
if (mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
mode == 'Mode' and config.linker in ['py']):
sample_size = (10, 100)
steps = 50
else:
sample_size = (500, 50)
steps = int(1e3)
x = tensor.matrix()
for size, const_size, var_input, input in [
(sample_size, sample_size, [], []),
(x.shape, sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)]),
((x.shape[0], sample_size[1]), sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)]),
# test empty size (scalar)
((), (), [], []),
]:
# TEST CPU IMPLEMENTATION
# The python and C implementation are tested with DebugMode
# print ''
# print 'ON CPU with size=(%s):' % str(size)
x = tensor.matrix()
R = MRG_RandomStreams(234, use_cuda=False)
# Note: we specify `nstreams` to avoid a warning.
# TODO Look for all occurrences of `guess_n_streams` and `30 * 256`
# for such situations: it would be better to instead filter the
# warning using the warning module.
u = R.uniform(size=size,
nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, u, mode=mode)
assert any([isinstance(node.op, theano.sandbox.rng_mrg.mrg_uniform)
for node in f.maker.fgraph.toposort()])
# theano.printing.debugprint(f)
cpu_out = f(*input)
# print 'CPU: random?[:10], random?[-10:]'
# print cpu_out[0, 0:10]
# print cpu_out[-1, -10:]
# Increase the number of steps if sizes implies only a few samples
if numpy.prod(const_size) < 10:
steps_ = steps * 100
else:
steps_ = steps
basictest(f, steps_, const_size, prefix='mrg cpu', inputs=input)
if mode != 'FAST_COMPILE' and cuda_available:
# print ''
# print 'ON GPU with size=(%s):' % str(size)
R = MRG_RandomStreams(234, use_cuda=True)
u = R.uniform(size=size, dtype='float32',
nstreams=rng_mrg.guess_n_streams(size, warn=False))
# well, it's really that this test w GPU doesn't make sense otw
assert u.dtype == 'float32'
f = theano.function(var_input, theano.Out(
theano.sandbox.cuda.basic_ops.gpu_from_host(u),
borrow=True), mode=mode_with_gpu)
assert any([isinstance(node.op,
theano.sandbox.rng_mrg.GPU_mrg_uniform)
for node in f.maker.fgraph.toposort()])
# theano.printing.debugprint(f)
gpu_out = numpy.asarray(f(*input))
# print 'GPU: random?[:10], random?[-10:]'
# print gpu_out[0, 0:10]
# print gpu_out[-1, -10:]
basictest(f, steps_, const_size, prefix='mrg gpu', inputs=input)
numpy.testing.assert_array_almost_equal(cpu_out, gpu_out,
decimal=6)
# print ''
# print 'ON CPU w Numpy with size=(%s):' % str(size)
RR = theano.tensor.shared_randomstreams.RandomStreams(234)
uu = RR.uniform(size=size)
ff = theano.function(var_input, uu, mode=mode)
# It's not our problem if numpy generates 0 or 1
basictest(ff, steps_, const_size, prefix='numpy',
allow_01=True, inputs=input)
@attr('slow')
def test_binomial():
# TODO: test size=None, ndim=X
# TODO: test size=X, ndim!=X.ndim
# TODO: test random seed in legal value(!=0 and other)
# TODO: test sample_size not a multiple of guessed #streams
# TODO: test size=Var, with shape that change from call to call
# we test size in a tuple of int and a tensor.shape.
# we test the param p with int.
if (mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
mode == 'Mode' and config.linker in ['py']):
sample_size = (10, 50)
steps = 50
rtol = 0.02
else:
sample_size = (500, 50)
steps = int(1e3)
rtol = 0.01
x = tensor.matrix()
for mean in [0.1, 0.5]:
for size, const_size, var_input, input in [
(sample_size, sample_size, [], []),
(x.shape, sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)]),
((x.shape[0], sample_size[1]), sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)]),
# test empty size (scalar)
((), (), [], []),
]:
yield (t_binomial, mean, size, const_size, var_input, input,
steps, rtol)
def t_binomial(mean, size, const_size, var_input, input, steps, rtol):
R = MRG_RandomStreams(234, use_cuda=False)
u = R.binomial(size=size, p=mean)
f = theano.function(var_input, u, mode=mode)
out = f(*input)
# Increase the number of steps if sizes implies only a few samples
if numpy.prod(const_size) < 10:
steps_ = steps * 100
else:
steps_ = steps
basictest(f, steps_, const_size, prefix='mrg cpu',
inputs=input, allow_01=True,
target_avg=mean, mean_rtol=rtol)
if mode != 'FAST_COMPILE' and cuda_available:
R = MRG_RandomStreams(234, use_cuda=True)
u = R.binomial(size=size, p=mean, dtype='float32')
# well, it's really that this test w GPU doesn't make sense otw
assert u.dtype == 'float32'
f = theano.function(var_input, theano.Out(
theano.sandbox.cuda.basic_ops.gpu_from_host(u),
borrow=True), mode=mode_with_gpu)
gpu_out = numpy.asarray(f(*input))
basictest(f, steps_, const_size, prefix='mrg gpu',
inputs=input, allow_01=True,
target_avg=mean, mean_rtol=rtol)
numpy.testing.assert_array_almost_equal(out, gpu_out,
decimal=6)
RR = theano.tensor.shared_randomstreams.RandomStreams(234)
uu = RR.binomial(size=size, p=mean)
ff = theano.function(var_input, uu, mode=mode)
# It's not our problem if numpy generates 0 or 1
basictest(ff, steps_, const_size, prefix='numpy', allow_01=True,
inputs=input, target_avg=mean, mean_rtol=rtol)
@attr('slow')
def test_normal0():
steps = 50
std = 2.
if (mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
mode == 'Mode' and config.linker in ['py']):
sample_size = (25, 30)
default_rtol = .02
else:
sample_size = (999, 50)
default_rtol = .01
sample_size_odd = (sample_size[0], sample_size[1] - 1)
x = tensor.matrix()
for size, const_size, var_input, input, avg, rtol, std_tol in [
(sample_size, sample_size, [], [], -5., default_rtol, default_rtol),
(x.shape, sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)],
-5., default_rtol, default_rtol),
((x.shape[0], sample_size[1]), sample_size, [x],
[numpy.zeros(sample_size, dtype=config.floatX)],
-5., default_rtol, default_rtol),
# test odd value
(sample_size_odd, sample_size_odd, [], [], -5.,
default_rtol, default_rtol),
# test odd value
(x.shape, sample_size_odd, [x],
[numpy.zeros(sample_size_odd, dtype=config.floatX)],
-5., default_rtol, default_rtol),
(sample_size, sample_size, [], [],
numpy.arange(numpy.prod(sample_size),
dtype='float32').reshape(sample_size),
10. * std / numpy.sqrt(steps), default_rtol),
# test empty size (scalar)
((), (), [], [], -5., default_rtol, 0.02),
# test with few samples at the same time
((1,), (1,), [], [], -5., default_rtol, 0.02),
((2,), (2,), [], [], -5., default_rtol, 0.02),
((3,), (3,), [], [], -5., default_rtol, 0.02),
]:
# print ''
# print 'ON CPU:'
R = MRG_RandomStreams(234, use_cuda=False)
# Note: we specify `nstreams` to avoid a warning.
n = R.normal(size=size, avg=avg, std=std,
nstreams=rng_mrg.guess_n_streams(size, warn=False))
f = theano.function(var_input, n, mode=mode)
# theano.printing.debugprint(f)
out = f(*input)
# print 'random?[:10]\n', out[0, 0:10]
# Increase the number of steps if size implies only a few samples
if numpy.prod(const_size) < 10:
steps_ = steps * 50
else:
steps_ = steps
basictest(f, steps_, const_size, target_avg=avg, target_std=std,
prefix='mrg ', allow_01=True, inputs=input,
mean_rtol=rtol, std_tol=std_tol)
sys.stdout.flush()
if mode != 'FAST_COMPILE' and cuda_available:
# print ''
# print 'ON GPU:'
R = MRG_RandomStreams(234, use_cuda=True)
n = R.normal(size=size, avg=avg, std=std, dtype='float32',
nstreams=rng_mrg.guess_n_streams(size, warn=False))
# well, it's really that this test w GPU doesn't make sense otw
assert n.dtype == 'float32'
f = theano.function(var_input, theano.Out(
theano.sandbox.cuda.basic_ops.gpu_from_host(n),
borrow=True), mode=mode_with_gpu)
# theano.printing.debugprint(f)
sys.stdout.flush()
gpu_out = numpy.asarray(f(*input))
# print 'random?[:10]\n', gpu_out[0, 0:10]
# print '----'
sys.stdout.flush()
basictest(f, steps_, const_size, target_avg=avg, target_std=std,
prefix='gpu mrg ', allow_01=True, inputs=input,
mean_rtol=rtol, std_tol=std_tol)
# Need to allow some rounding error as their is float
# computation that are done on the gpu vs cpu
assert numpy.allclose(out, gpu_out, rtol=5e-6, atol=5e-6)
# print ''
# print 'ON CPU w NUMPY:'
RR = theano.tensor.shared_randomstreams.RandomStreams(234)
nn = RR.normal(size=size, avg=avg, std=std)
ff = theano.function(var_input, nn)
basictest(ff, steps_, const_size, target_avg=avg, target_std=std,
prefix='numpy ', allow_01=True, inputs=input, mean_rtol=rtol)
def basic_multinomialtest(f, steps, sample_size, target_pvals, n_samples,
prefix="", mean_rtol=0.04):
dt = 0.0
avg_pvals = numpy.zeros(target_pvals.shape, dtype=config.floatX)
for i in xrange(steps):
t0 = time.time()
ival = f()
assert ival.shape == sample_size
assert numpy.all(numpy.sum(ival, axis=1) == n_samples)
dt += time.time() - t0
avg_pvals += ival
avg_pvals /= (steps * n_samples)
assert numpy.mean(abs(avg_pvals - target_pvals)) < mean_rtol
print('random?[:10]\n', numpy.asarray(f()[:10]))
print(prefix, 'mean', avg_pvals)
# < mean_rtol, 'bad mean? %s %s' % (str(avg_pvals), str(target_pvals))
print(numpy.mean(abs(avg_pvals - target_pvals)))
print(prefix, 'time', dt)
print(prefix, 'elements', steps * numpy.prod(target_pvals.shape))
print(prefix, 'samples/sec', steps * numpy.prod(target_pvals.shape) / dt)
def test_multinomial():
steps = 100
mode_ = mode
if mode == 'FAST_COMPILE':
mode_ = 'FAST_RUN'
if (mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
mode == 'Mode' and config.linker in ['py']):
sample_size = (49, 5)
else:
sample_size = (450, 6)
mode_ = theano.compile.mode.get_mode(mode_)
# print ''
# print 'ON CPU:'
pvals = numpy.asarray(numpy.random.uniform(size=sample_size))
pvals = numpy.apply_along_axis(lambda row: row / numpy.sum(row), 1, pvals)
R = MRG_RandomStreams(234, use_cuda=False)
# Note: we specify `nstreams` to avoid a warning.
m = R.multinomial(pvals=pvals, dtype=config.floatX, nstreams=30 * 256)
f = theano.function([], m, mode=mode_)
# theano.printing.debugprint(f)
out = f()
basic_multinomialtest(f, steps, sample_size, pvals, n_samples=1,
prefix='mrg ')
sys.stdout.flush()
if mode != 'FAST_COMPILE' and cuda_available:
# print ''
# print 'ON GPU:'
R = MRG_RandomStreams(234, use_cuda=True)
pvals = numpy.asarray(pvals, dtype='float32')
# We give the number of streams to avoid a warning.
n = R.multinomial(pvals=pvals, dtype='float32', nstreams=30 * 256)
# well, it's really that this test w GPU doesn't make sense otw
assert n.dtype == 'float32'
f = theano.function(
[],
theano.sandbox.cuda.basic_ops.gpu_from_host(n),
mode=mode_.including('gpu'))
# theano.printing.debugprint(f)
gpu_out = f()
sys.stdout.flush()
basic_multinomialtest(f, steps, sample_size, pvals, n_samples=1,
prefix='gpu mrg ')
numpy.testing.assert_array_almost_equal(out, gpu_out, decimal=6)
def test_multinomial_n_samples():
mode_ = mode
if mode == 'FAST_COMPILE':
mode_ = 'FAST_RUN'
if (mode in ['DEBUG_MODE', 'DebugMode', 'FAST_COMPILE'] or
mode == 'Mode' and config.linker in ['py']):
sample_size = (49, 5)
else:
sample_size = (450, 6)
mode_ = theano.compile.mode.get_mode(mode_)
pvals = numpy.asarray(numpy.random.uniform(size=sample_size))
pvals = numpy.apply_along_axis(lambda row: row / numpy.sum(row), 1, pvals)
R = MRG_RandomStreams(234, use_cuda=False)
for n_samples, steps in zip([5, 10, 100, 1000], [20, 10, 1, 1]):
m = R.multinomial(pvals=pvals, n=n_samples,
dtype=config.floatX, nstreams=30 * 256)
f = theano.function([], m, mode=mode_)
basic_multinomialtest(f, steps, sample_size, pvals,
n_samples, prefix='mrg ')
sys.stdout.flush()
if mode != 'FAST_COMPILE' and cuda_available:
R = MRG_RandomStreams(234, use_cuda=True)
pvals = numpy.asarray(pvals, dtype='float32')
n = R.multinomial(pvals=pvals, n=n_samples,
dtype='float32', nstreams=30 * 256)
assert n.dtype == 'float32'
f = theano.function(
[],
theano.sandbox.cuda.basic_ops.gpu_from_host(n),
mode=mode_.including('gpu'))
sys.stdout.flush()
basic_multinomialtest(f, steps, sample_size, pvals,
n_samples, prefix='gpu mrg ')
class T_MRG(unittest.TestCase):
def test_bad_size(self):
R = MRG_RandomStreams(234, use_cuda=False)
for size in [
(0, 100),
(-1, 100),
(1, 0),
]:
self.assertRaises(ValueError, R.uniform, size)
self.assertRaises(ValueError, R.binomial, size)
self.assertRaises(ValueError, R.multinomial, size, 1, [])
self.assertRaises(ValueError, R.normal, size)
def test_multiple_rng_aliasing():
"""
Test that when we have multiple random number generators, we do not alias
the state_updates member. `state_updates` can be useful when attempting to
copy the (random) state between two similar theano graphs. The test is
meant to detect a previous bug where state_updates was initialized as a
class-attribute, instead of the __init__ function.
"""
rng1 = MRG_RandomStreams(1234)
rng2 = MRG_RandomStreams(2392)
assert rng1.state_updates is not rng2.state_updates
def test_random_state_transfer():
"""
Test that random state can be transferred from one theano graph to another.
"""
class Graph:
def __init__(self, seed=123):
self.rng = MRG_RandomStreams(seed)
self.y = self.rng.uniform(size=(1,))
g1 = Graph(seed=123)
f1 = theano.function([], g1.y)
g2 = Graph(seed=987)
f2 = theano.function([], g2.y)
g2.rng.rstate = g1.rng.rstate
for (su1, su2) in zip(g1.rng.state_updates, g2.rng.state_updates):
su2[0].set_value(su1[0].get_value())
numpy.testing.assert_array_almost_equal(f1(), f2(), decimal=6)
def test_gradient_scan():
# Test for a crash when using MRG inside scan and taking the gradient
# See https://groups.google.com/d/msg/theano-dev/UbcYyU5m-M8/UO9UgXqnQP0J
theano_rng = MRG_RandomStreams(10)
w = theano.shared(numpy.ones(1, dtype='float32'))
def one_step(x):
return x + theano_rng.uniform((1,), dtype='float32') * w
x = tensor.vector(dtype='float32')
values, updates = theano.scan(one_step, outputs_info=x, n_steps=10)
gw = theano.grad(tensor.sum(values[-1]), w)
f = theano.function([x], gw)
f(numpy.arange(1, dtype='float32'))
def test_multMatVect():
A1 = tensor.lmatrix('A1')
s1 = tensor.ivector('s1')
m1 = tensor.iscalar('m1')
A2 = tensor.lmatrix('A2')
s2 = tensor.ivector('s2')
m2 = tensor.iscalar('m2')
g0 = rng_mrg.DotModulo()(A1, s1, m1, A2, s2, m2)
f0 = theano.function([A1, s1, m1, A2, s2, m2], g0)
i32max = numpy.iinfo(numpy.int32).max
A1 = numpy.random.randint(0, i32max, (3, 3)).astype('int64')
s1 = numpy.random.randint(0, i32max, 3).astype('int32')
m1 = numpy.asarray(numpy.random.randint(i32max), dtype="int32")
A2 = numpy.random.randint(0, i32max, (3, 3)).astype('int64')
s2 = numpy.random.randint(0, i32max, 3).astype('int32')
m2 = numpy.asarray(numpy.random.randint(i32max), dtype="int32")
f0.input_storage[0].storage[0] = A1
f0.input_storage[1].storage[0] = s1
f0.input_storage[2].storage[0] = m1
f0.input_storage[3].storage[0] = A2
f0.input_storage[4].storage[0] = s2
f0.input_storage[5].storage[0] = m2
r_a1 = rng_mrg.matVecModM(A1, s1, m1)
r_a2 = rng_mrg.matVecModM(A2, s2, m2)
f0.fn()
r_b = f0.output_storage[0].value
assert numpy.allclose(r_a1, r_b[:3])
assert numpy.allclose(r_a2, r_b[3:])
def test_seed_fn():
test_use_cuda = [False]
if cuda_available:
test_use_cuda.append(True)
idx = tensor.ivector()
for use_cuda in test_use_cuda:
if config.mode == 'FAST_COMPILE' and use_cuda:
mode = 'FAST_RUN'
else:
mode = config.mode
for new_seed, same in [(234, True), (None, True), (23, False)]:
random = MRG_RandomStreams(234, use_cuda=use_cuda)
fn1 = theano.function([], random.uniform((2, 2), dtype='float32'),
mode=mode)
fn2 = theano.function([], random.uniform((3, 3), nstreams=2,
dtype='float32'),
mode=mode)
fn3 = theano.function([idx],
random.uniform(idx, nstreams=3, ndim=1,
dtype='float32'),
mode=mode)
fn1_val0 = fn1()
fn1_val1 = fn1()
assert not numpy.allclose(fn1_val0, fn1_val1)
fn2_val0 = fn2()
fn2_val1 = fn2()
assert not numpy.allclose(fn2_val0, fn2_val1)
fn3_val0 = fn3([4])
fn3_val1 = fn3([4])
assert not numpy.allclose(fn3_val0, fn3_val1)
assert fn1_val0.size == 4
assert fn2_val0.size == 9
random.seed(new_seed)
fn1_val2 = fn1()
fn1_val3 = fn1()
fn2_val2 = fn2()
fn2_val3 = fn2()
fn3_val2 = fn3([4])
fn3_val3 = fn3([4])
assert numpy.allclose(fn1_val0, fn1_val2) == same
assert numpy.allclose(fn1_val1, fn1_val3) == same
assert numpy.allclose(fn2_val0, fn2_val2) == same
assert numpy.allclose(fn2_val1, fn2_val3) == same
assert numpy.allclose(fn3_val0, fn3_val2) == same
assert numpy.allclose(fn3_val1, fn3_val3) == same
if __name__ == "__main__":
rng = MRG_RandomStreams( | numpy.random.randint(2147462579) | numpy.random.randint |
from .label_model import ClassConditionalLabelModel, LearningConfig, init_random
import numpy as np
from scipy import sparse
import torch
from torch import nn
class NaiveBayes(ClassConditionalLabelModel):
"""A generative label model that assumes that all labeling functions are
conditionally independent given the true class label, i.e., the naive Bayes
assumption.
Proposed in: <NAME> and <NAME>. Maximum likelihood
estimation of observer error-rates using the EM algorithm.
Journal of the Royal Statistical Society C, 28(1):20–28, 1979.
Proposed for labeling functions in: <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>. Data programming: Creating large training sets, quickly. In
Neural Information Processing Systems, 2016.
"""
def __init__(self, num_classes, num_lfs, init_acc=.9, acc_prior=0.025,
balance_prior=0.025, learn_class_balance=True):
"""Constructor.
Initializes labeling function accuracies using optional argument and all
other model parameters uniformly.
:param num_classes: number of target classes, i.e., binary
classification = 2
:param num_lfs: number of labeling functions to model
:param init_acc: initial estimated labeling function accuracy, must
be a float in [0,1]
:param acc_prior: strength of regularization of estimated labeling
function accuracies toward their initial values
:param learn_class_balance: whether to estimate the distribution over
target classes (True) or assume to be
uniform (False)
"""
super().__init__(num_classes, num_lfs, init_acc, acc_prior)
self.class_balance = nn.Parameter(
torch.zeros([num_classes]), requires_grad=learn_class_balance)
self.balance_prior = balance_prior
def forward(self, votes):
"""Computes log likelihood of labeling function outputs for each
example in the batch.
For efficiency, this function prefers that votes is an instance of
scipy.sparse.coo_matrix. You can avoid a conversion by passing in votes
with this class.
:param votes: m x n matrix in {0, ..., k}, where m is the batch size,
n is the number of labeling functions and k is the number
of classes
:return: 1-d tensor of length m, where each element is the
log-likelihood of the corresponding row in labels
"""
class_ll = self._get_norm_class_balance()
conditional_ll = self._get_labeling_function_likelihoods(votes)
joint_ll = conditional_ll + class_ll
return torch.logsumexp(joint_ll, dim=1)
def estimate_label_model(self, votes, config=None):
"""Estimates the parameters of the label model based on observed
labeling function outputs.
:param votes: m x n matrix in {0, ..., k}, where m is the batch size,
n is the number of labeling functions and k is the number
of classes
:param config: optional LearningConfig instance. If None, initialized
with default constructor
"""
if config is None:
config = LearningConfig()
# Initializes random seed
init_random(config.random_seed)
# Converts to CSR to standardize input
votes = sparse.csr_matrix(votes, dtype=np.int)
batches = self._create_minibatches(
votes, config.batch_size, shuffle_rows=True)
self._do_estimate_label_model(batches, config)
def get_label_distribution(self, votes):
"""Returns the posterior distribution over true labels given labeling
function outputs according to the model
:param votes: m x n matrix in {0, ..., k}, where m is the batch size,
n is the number of labeling functions and k is the number
of classes
:return: m x k matrix, where each row is the posterior distribution over
the true class label for the corresponding example
"""
# Converts to CSR to standardize input
votes = sparse.csr_matrix(votes, dtype=np.int)
labels = np.ndarray((votes.shape[0], self.num_classes))
batches = self._create_minibatches(votes, 4096, shuffle_rows=False)
offset = 0
for votes, in batches:
class_balance = self._get_norm_class_balance()
lf_likelihood = self._get_labeling_function_likelihoods(votes)
jll = class_balance + lf_likelihood
for i in range(votes.shape[0]):
p = torch.exp(jll[i, :] - torch.max(jll[i, :]))
p = p / p.sum()
for j in range(self.num_classes):
labels[offset + i, j] = p[j]
offset += votes.shape[0]
return labels
def get_most_probable_labels(self, votes):
"""Returns the most probable true labels given observed function outputs.
:param votes: m x n matrix in {0, ..., k}, where m is the batch size,
n is the number of labeling functions and k is the number
of classes
:return: 1-d Numpy array of most probable labels
"""
return np.argmax(self.get_label_distribution(votes), axis=1) + 1
def get_class_balance(self):
"""Returns the model's estimated class balance
:return: a NumPy array with one element in [0,1] for each target class,
representing the estimated prior probability that an example
has that label
"""
return np.exp(self._get_norm_class_balance().detach().numpy())
def _create_minibatches(self, votes, batch_size, shuffle_rows=False):
if shuffle_rows:
index = np.arange(np.shape(votes)[0])
np.random.shuffle(index)
votes = votes[index, :]
# Creates minibatches
batches = [(sparse.coo_matrix(
votes[i * batch_size: (i + 1) * batch_size, :],
copy=True),)
for i in range(int( | np.ceil(votes.shape[0] / batch_size) | numpy.ceil |
"""
Convert label image to colors and other way around
SAMPLE run:
>> python run_image_convert_label_color.py \
-imgs "data_images/drosophila_ovary_slice/segm/*.png" \
-out data_images/drosophila_ovary_slice/segm_rgb \
-clrs data_images/drosophila_ovary_slice/segm_rgb/dict_label-color.json
Copyright (C) 2014-2016 <NAME> <<EMAIL>>
"""
import os
import sys
import glob
import json
import logging
import argparse
import multiprocessing as mproc
from functools import partial
import numpy as np
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
import imsegm.utils.data_io as tl_data
import imsegm.utils.experiments as tl_expt
import imsegm.annotation as seg_annot
PATH_INPUT = os.path.join('data_images', 'drosophila_ovary_slice', 'segm', '*.png')
PATH_OUTPUT = os.path.join('data_images', 'drosophila_ovary_slice', 'segm_rgb')
NAME_JSON_DICT = 'dictionary_label-color.json'
NB_THREADS = max(1, int(mproc.cpu_count() * 0.9))
def parse_arg_params():
""" create simple arg parser with default values (input, results, dataset)
:return obj: argparse
"""
parser = argparse.ArgumentParser()
parser.add_argument('-imgs', '--path_images', type=str, required=True,
help='path to dir with images', default=PATH_INPUT)
parser.add_argument('-out', '--path_out', type=str, required=True,
help='path to output dir', default=PATH_OUTPUT)
parser.add_argument('-clrs', '--path_colors', type=str, required=False,
help='json with colour-label dict', default=None)
parser.add_argument('--nb_jobs', type=int, required=False,
help='number of jobs in parallel', default=NB_THREADS)
args = vars(parser.parse_args())
for n in ['path_images', 'path_out']:
p_dir = tl_data.update_path(os.path.dirname(args[n]))
assert os.path.isdir(p_dir), 'missing: %s' % args[n]
args[n] = os.path.join(p_dir, os.path.basename(args[n]))
if args['path_colors'] is not None:
args['path_colors'] = tl_data.update_path(args['path_colors'])
logging.info(tl_expt.string_dict(args, desc='ARG PARAMETERS'))
return args
def load_dict_colours(path_json):
if path_json is not None and os.path.isfile(path_json):
with open(path_json, 'r') as fp:
dict_colors = json.load(fp)
else:
dict_colors = {}
# convert to correct type
dict_colors = {int(lb): tuple(dict_colors[lb]) for lb in dict_colors}
return dict_colors
def convert_labels_2_colors(img, dict_colors, path_out):
img_labels = | np.unique(img) | numpy.unique |
"""Define the Component class."""
from collections import OrderedDict, Counter, defaultdict
from collections.abc import Iterable
from itertools import product
import numpy as np
from numpy import ndarray, isscalar, atleast_1d, atleast_2d, promote_types
from scipy.sparse import issparse
from openmdao.core.system import System, _supported_methods, _DEFAULT_COLORING_META, \
global_meta_names
from openmdao.core.constants import _UNDEFINED, INT_DTYPE
from openmdao.jacobians.dictionary_jacobian import DictionaryJacobian
from openmdao.vectors.vector import _full_slice
from openmdao.utils.array_utils import shape_to_len
from openmdao.utils.units import valid_units
from openmdao.utils.name_maps import rel_key2abs_key, abs_key2rel_key, rel_name2abs_name
from openmdao.utils.mpi import MPI
from openmdao.utils.general_utils import format_as_float_or_array, ensure_compatible, \
find_matches, simple_warning, make_set, _is_slicer_op
import openmdao.utils.coloring as coloring_mod
_forbidden_chars = ['.', '*', '?', '!', '[', ']']
_whitespace = set([' ', '\t', '\r', '\n'])
def _valid_var_name(name):
"""
Determine if the proposed name is a valid variable name.
Leading and trailing whitespace is illegal, and a specific list of characters
are illegal anywhere in the string.
Parameters
----------
name : str
Proposed name.
Returns
-------
bool
True if the proposed name is a valid variable name, else False.
"""
global _forbidden_chars, _whitespace
if not name:
return False
for char in _forbidden_chars:
if char in name:
return False
return name[0] not in _whitespace and name[-1] not in _whitespace
class Component(System):
"""
Base Component class; not to be directly instantiated.
Attributes
----------
_approx_schemes : OrderedDict
A mapping of approximation types to the associated ApproximationScheme.
_var_rel2meta : dict
Dictionary mapping relative names to metadata.
This is only needed while adding inputs and outputs. During setup, these are used to
build the dictionaries of metadata.
_static_var_rel2meta : dict
Static version of above - stores data for variables added outside of setup.
_var_rel_names : {'input': [str, ...], 'output': [str, ...]}
List of relative names of owned variables existing on current proc.
This is only needed while adding inputs and outputs. During setup, these are used to
determine the list of absolute names.
_static_var_rel_names : dict
Static version of above - stores names of variables added outside of setup.
_declared_partials : dict
Cached storage of user-declared partials.
_declared_partial_checks : list
Cached storage of user-declared check partial options.
_no_check_partials : bool
If True, the check_partials function will ignore this component.
"""
def __init__(self, **kwargs):
"""
Initialize all attributes.
Parameters
----------
**kwargs : dict of keyword arguments
available here and in all descendants of this system.
"""
super().__init__(**kwargs)
self._var_rel_names = {'input': [], 'output': []}
self._var_rel2meta = {}
self._static_var_rel_names = {'input': [], 'output': []}
self._static_var_rel2meta = {}
self._declared_partials = defaultdict(dict)
self._declared_partial_checks = []
self._no_check_partials = False
def _declare_options(self):
"""
Declare options before kwargs are processed in the init method.
"""
super()._declare_options()
self.options.declare('distributed', types=bool, default=False,
desc='True if the component has variables that are distributed '
'across multiple processes.')
def setup(self):
"""
Declare inputs and outputs.
Available attributes:
name
pathname
comm
options
"""
pass
def _setup_procs(self, pathname, comm, mode, prob_meta):
"""
Execute first phase of the setup process.
Distribute processors, assign pathnames, and call setup on the component.
Parameters
----------
pathname : str
Global name of the system, including the path.
comm : MPI.Comm or <FakeComm>
MPI communicator object.
mode : str
Derivatives calculation mode, 'fwd' for forward, and 'rev' for
reverse (adjoint). Default is 'rev'.
prob_meta : dict
Problem level metadata.
"""
super()._setup_procs(pathname, comm, mode, prob_meta)
orig_comm = comm
if self._num_par_fd > 1:
if comm.size > 1:
comm = self._setup_par_fd_procs(comm)
elif not MPI:
msg = ("%s: MPI is not active but num_par_fd = %d. No parallel finite difference "
"will be performed." % (self.msginfo, self._num_par_fd))
simple_warning(msg)
self.comm = comm
# Clear out old variable information so that we can call setup on the component.
self._var_rel_names = {'input': [], 'output': []}
self._var_rel2meta = {}
# reset shape if any dynamic shape parameters are set in case this is a resetup
# NOTE: this is necessary because we allow variables to be added in __init__.
for meta in self._static_var_rel2meta.values():
if 'shape_by_conn' in meta and (meta['shape_by_conn'] or
meta['copy_shape'] is not None):
meta['shape'] = None
if not np.isscalar(meta['value']):
if meta['value'].size > 0:
meta['value'] = meta['value'].flatten()[0]
else:
meta['value'] = 1.0
self._var_rel2meta.update(self._static_var_rel2meta)
for io in ['input', 'output']:
self._var_rel_names[io].extend(self._static_var_rel_names[io])
self.setup()
self._set_vector_class()
def _set_vector_class(self):
if self.options['distributed']:
dist_vec_class = self._problem_meta['distributed_vector_class']
if dist_vec_class is not None:
self._vector_class = dist_vec_class
else:
simple_warning("The 'distributed' option is set to True for Component %s, "
"but there is no distributed vector implementation (MPI/PETSc) "
"available. The default non-distributed vectors will be used."
% self.pathname)
self._vector_class = self._problem_meta['local_vector_class']
else:
self._vector_class = self._problem_meta['local_vector_class']
def _configure_check(self):
"""
Do any error checking on i/o configuration.
"""
# check here if declare_coloring was called during setup but declare_partials
# wasn't. If declare partials wasn't called, call it with of='*' and wrt='*' so we'll
# have something to color.
if self._coloring_info['coloring'] is not None:
for key, meta in self._declared_partials.items():
if 'method' in meta and meta['method'] is not None:
break
else:
method = self._coloring_info['method']
simple_warning("%s: declare_coloring or use_fixed_coloring was called but no approx"
" partials were declared. Declaring all partials as approximated "
"using default metadata and method='%s'." % (self.msginfo, method))
self.declare_partials('*', '*', method=method)
super()._configure_check()
def _setup_var_data(self):
"""
Compute the list of abs var names, abs/prom name maps, and metadata dictionaries.
"""
global global_meta_names
super()._setup_var_data()
allprocs_prom2abs_list = self._var_allprocs_prom2abs_list
abs2prom = self._var_allprocs_abs2prom = self._var_abs2prom
# Compute the prefix for turning rel/prom names into abs names
prefix = self.pathname + '.' if self.pathname else ''
iproc = self.comm.rank
for io in ['input', 'output']:
abs2meta = self._var_abs2meta[io]
allprocs_abs2meta = self._var_allprocs_abs2meta[io]
is_input = io == 'input'
for i, prom_name in enumerate(self._var_rel_names[io]):
abs_name = prefix + prom_name
abs2meta[abs_name] = metadata = self._var_rel2meta[prom_name]
# Compute allprocs_prom2abs_list, abs2prom
allprocs_prom2abs_list[io][prom_name] = [abs_name]
abs2prom[io][abs_name] = prom_name
allprocs_abs2meta[abs_name] = {
meta_name: metadata[meta_name]
for meta_name in global_meta_names[io]
}
if is_input and 'src_indices' in metadata:
allprocs_abs2meta[abs_name]['has_src_indices'] = \
metadata['src_indices'] is not None
# ensure that if src_indices is a slice we reset it to that instead of
# the converted array value (in case this is a re-setup), so that we can
# re-convert using potentially different sizing information.
if metadata['src_slice'] is not None:
metadata['src_indices'] = metadata['src_slice']
for prom_name, val in self._var_discrete[io].items():
abs_name = prefix + prom_name
# Compute allprocs_prom2abs_list, abs2prom
allprocs_prom2abs_list[io][prom_name] = [abs_name]
abs2prom[io][abs_name] = prom_name
# Compute allprocs_discrete (metadata for discrete vars)
self._var_allprocs_discrete[io][abs_name] = v = val.copy()
del v['value']
if self._var_discrete['input'] or self._var_discrete['output']:
self._discrete_inputs = _DictValues(self._var_discrete['input'])
self._discrete_outputs = _DictValues(self._var_discrete['output'])
else:
self._discrete_inputs = self._discrete_outputs = ()
def _setup_var_sizes(self):
"""
Compute the arrays of variable sizes for all variables/procs on this system.
"""
iproc = self.comm.rank
for io in ('input', 'output'):
sizes = self._var_sizes['nonlinear'][io] = np.zeros((self.comm.size,
len(self._var_rel_names[io])),
dtype=INT_DTYPE)
for i, (name, metadata) in enumerate(self._var_allprocs_abs2meta[io].items()):
sizes[iproc, i] = metadata['size']
if self.comm.size > 1:
my_sizes = sizes[iproc, :].copy()
self.comm.Allgather(my_sizes, sizes)
# all names are relevant for the 'nonlinear' and 'linear' vectors. We
# can then use them to compute the size arrays of for all other vectors
# based on the nonlinear size array.
nl_allprocs_relnames = self._var_allprocs_relevant_names['nonlinear']
nl_relnames = self._var_relevant_names['nonlinear']
for io in ('input', 'output'):
nl_allprocs_relnames[io] = list(self._var_allprocs_abs2meta[io])
nl_relnames[io] = list(self._var_abs2meta[io])
self._setup_var_index_maps('nonlinear')
self._owned_sizes = self._var_sizes['nonlinear']['output']
if self._use_derivatives:
sizes = self._var_sizes
nl_sizes = sizes['nonlinear']
nl_abs2idx = self._var_allprocs_abs2idx['nonlinear']
sizes['linear'] = nl_sizes
self._var_allprocs_relevant_names['linear'] = nl_allprocs_relnames
self._var_relevant_names['linear'] = nl_relnames
self._var_allprocs_abs2idx['linear'] = nl_abs2idx
# Initialize size arrays for other linear vecs besides 'linear'
# (which is the same as 'nonlinear')
for vec_name in self._lin_rel_vec_name_list[1:]:
# at component level, _var_allprocs_* is the same as var_* since all vars exist in
# all procs for a given component, so we don't have to mess with figuring out what
# vars are local.
relnames = self._var_allprocs_relevant_names[vec_name]
sizes[vec_name] = {}
for io in ('input', 'output'):
sizes[vec_name][io] = sz = np.zeros((self.comm.size, len(relnames[io])),
INT_DTYPE)
# Variables for this vec_name are a subset of those for nonlinear, so just
# take columns of the nonlinear sizes array
for idx, abs_name in enumerate(relnames[io]):
sz[:, idx] = nl_sizes[io][:, nl_abs2idx[abs_name]]
self._setup_var_index_maps(vec_name)
def _setup_partials(self):
"""
Process all partials and approximations that the user declared.
"""
self._subjacs_info = {}
self._jacobian = DictionaryJacobian(system=self)
self.setup_partials() # hook for component writers to specify sparsity patterns
# check to make sure that if num_par_fd > 1 that this system is actually doing FD.
# Unfortunately we have to do this check after system setup has been called because that's
# when declare_partials generally happens, so we raise an exception here instead of just
# resetting the value of num_par_fd (because the comm has already been split and possibly
# used by the system setup).
orig_comm = self._full_comm if self._full_comm is not None else self.comm
if self._num_par_fd > 1 and orig_comm.size > 1 and not (self._owns_approx_jac or
self._approx_schemes):
raise RuntimeError("%s: num_par_fd is > 1 but no FD is active." % self.msginfo)
for key, dct in self._declared_partials.items():
of, wrt = key
self._declare_partials(of, wrt, dct)
def setup_partials(self):
"""
Declare partials.
This is meant to be overridden by component classes. All partials should be
declared here since this is called after all size/shape information is known for
all variables.
"""
pass
def _update_wrt_matches(self, info):
"""
Determine the list of wrt variables that match the wildcard(s) given in declare_coloring.
Parameters
----------
info : dict
Coloring metadata dict.
"""
ofs, allwrt = self._get_partials_varlists()
wrt_patterns = info['wrt_patterns']
matches_prom = set()
for w in wrt_patterns:
matches_prom.update(find_matches(w, allwrt))
# error if nothing matched
if not matches_prom:
raise ValueError("{}: Invalid 'wrt' variable(s) specified for colored approx partial "
"options: {}.".format(self.msginfo, wrt_patterns))
info['wrt_matches_prom'] = matches_prom
info['wrt_matches'] = [rel_name2abs_name(self, n) for n in matches_prom]
def _update_subjac_sparsity(self, sparsity):
"""
Update subjac sparsity info based on the given coloring.
The sparsity of the partial derivatives in this component will be used when computing
the sparsity of the total jacobian for the entire model. Without this, all of this
component's partials would be treated as dense, resulting in an overly conservative
coloring of the total jacobian.
Parameters
----------
sparsity : dict
A nested dict of the form dct[of][wrt] = (rows, cols, shape)
"""
# sparsity uses relative names, so we need to convert to absolute
pathname = self.pathname
for of, sub in sparsity.items():
of_abs = '.'.join((pathname, of)) if pathname else of
for wrt, tup in sub.items():
wrt_abs = '.'.join((pathname, wrt)) if pathname else wrt
abs_key = (of_abs, wrt_abs)
if abs_key in self._subjacs_info:
# add sparsity info to existing partial info
self._subjacs_info[abs_key]['sparsity'] = tup
def add_input(self, name, val=1.0, shape=None, src_indices=None, flat_src_indices=None,
units=None, desc='', tags=None, shape_by_conn=False, copy_shape=None):
"""
Add an input variable to the component.
Parameters
----------
name : str
name of the variable in this component's namespace.
val : float or list or tuple or ndarray or Iterable
The initial value of the variable being added in user-defined units.
Default is 1.0.
shape : int or tuple or list or None
Shape of this variable, only required if src_indices not provided and
val is not an array. Default is None.
src_indices : int or list of ints or tuple of ints or int ndarray or Iterable or None
The global indices of the source variable to transfer data from.
A value of None implies this input depends on all entries of source.
Default is None. The shapes of the target and src_indices must match,
and form of the entries within is determined by the value of 'flat_src_indices'.
flat_src_indices : bool
If True, each entry of src_indices is assumed to be an index into the
flattened source. Otherwise each entry must be a tuple or list of size equal
to the number of dimensions of the source.
units : str or None
Units in which this input variable will be provided to the component
during execution. Default is None, which means it is unitless.
desc : str
description of the variable
tags : str or list of strs
User defined tags that can be used to filter what gets listed when calling
list_inputs and list_outputs.
shape_by_conn : bool
If True, shape this input to match its connected output.
copy_shape : str or None
If a str, that str is the name of a variable. Shape this input to match that of
the named variable.
Returns
-------
dict
metadata for added variable
"""
# First, type check all arguments
if not isinstance(name, str):
raise TypeError('%s: The name argument should be a string.' % self.msginfo)
if not _valid_var_name(name):
raise NameError("%s: '%s' is not a valid input name." % (self.msginfo, name))
if not isscalar(val) and not isinstance(val, (list, tuple, ndarray, Iterable)):
raise TypeError('%s: The val argument should be a float, list, tuple, ndarray or '
'Iterable' % self.msginfo)
if shape is not None and not isinstance(shape, (int, tuple, list, np.integer)):
raise TypeError("%s: The shape argument should be an int, tuple, or list but "
"a '%s' was given" % (self.msginfo, type(shape)))
if src_indices is not None and not isinstance(src_indices, (int, list, tuple,
ndarray, Iterable)):
raise TypeError('%s: The src_indices argument should be an int, list, '
'tuple, ndarray or Iterable' % self.msginfo)
if units is not None:
if not isinstance(units, str):
raise TypeError('%s: The units argument should be a str or None.' % self.msginfo)
if not valid_units(units):
raise ValueError("%s: The units '%s' are invalid." % (self.msginfo, units))
if tags is not None and not isinstance(tags, (str, list)):
raise TypeError('The tags argument should be a str or list')
if (shape_by_conn or copy_shape):
if shape is not None or not isscalar(val):
raise ValueError("%s: If shape is to be set dynamically using 'shape_by_conn' or "
"'copy_shape', 'shape' and 'val' should be a scalar, "
"but shape of '%s' and val of '%s' was given for variable '%s'."
% (self.msginfo, shape, val, name))
if src_indices is not None:
raise ValueError("%s: Setting of 'src_indices' along with 'shape_by_conn' or "
"'copy_shape' for variable '%s' is currently unsupported." %
(self.msginfo, name))
src_slice = None
if not (shape_by_conn or copy_shape):
if src_indices is not None:
if _is_slicer_op(src_indices):
src_slice = src_indices
if flat_src_indices is not None:
simple_warning(f"{self.msginfo}: Input '{name}' was added with slice "
"src_indices, so flat_src_indices is ignored.")
flat_src_indices = True
else:
src_indices = np.asarray(src_indices, dtype=INT_DTYPE)
# value, shape: based on args, making sure they are compatible
val, shape, src_indices = ensure_compatible(name, val, shape, src_indices)
metadata = {
'value': val,
'shape': shape,
'size': shape_to_len(shape),
'src_indices': src_indices, # these will ultimately be converted to a flat index array
'flat_src_indices': flat_src_indices,
'src_slice': src_slice, # store slice def here, if any. This is never overwritten
'units': units,
'desc': desc,
'distributed': self.options['distributed'],
'tags': make_set(tags),
'shape_by_conn': shape_by_conn,
'copy_shape': copy_shape,
}
if self._static_mode:
var_rel2meta = self._static_var_rel2meta
var_rel_names = self._static_var_rel_names
else:
var_rel2meta = self._var_rel2meta
var_rel_names = self._var_rel_names
# Disallow dupes
if name in var_rel2meta:
raise ValueError("{}: Variable name '{}' already exists.".format(self.msginfo, name))
var_rel2meta[name] = metadata
var_rel_names['input'].append(name)
self._var_added(name)
return metadata
def add_discrete_input(self, name, val, desc='', tags=None):
"""
Add a discrete input variable to the component.
Parameters
----------
name : str
name of the variable in this component's namespace.
val : a picklable object
The initial value of the variable being added.
desc : str
description of the variable
tags : str or list of strs
User defined tags that can be used to filter what gets listed when calling
list_inputs and list_outputs.
Returns
-------
dict
metadata for added variable
"""
# First, type check all arguments
if not isinstance(name, str):
raise TypeError('%s: The name argument should be a string.' % self.msginfo)
if not _valid_var_name(name):
raise NameError("%s: '%s' is not a valid input name." % (self.msginfo, name))
if tags is not None and not isinstance(tags, (str, list)):
raise TypeError('%s: The tags argument should be a str or list' % self.msginfo)
metadata = {
'value': val,
'type': type(val),
'desc': desc,
'tags': make_set(tags),
}
if metadata['type'] == np.ndarray:
metadata.update({'shape': val.shape})
if self._static_mode:
var_rel2meta = self._static_var_rel2meta
else:
var_rel2meta = self._var_rel2meta
# Disallow dupes
if name in var_rel2meta:
raise ValueError("{}: Variable name '{}' already exists.".format(self.msginfo, name))
var_rel2meta[name] = self._var_discrete['input'][name] = metadata
self._var_added(name)
return metadata
def add_output(self, name, val=1.0, shape=None, units=None, res_units=None, desc='',
lower=None, upper=None, ref=1.0, ref0=0.0, res_ref=1.0, tags=None,
shape_by_conn=False, copy_shape=None):
"""
Add an output variable to the component.
Parameters
----------
name : str
name of the variable in this component's namespace.
val : float or list or tuple or ndarray
The initial value of the variable being added in user-defined units. Default is 1.0.
shape : int or tuple or list or None
Shape of this variable, only required if val is not an array.
Default is None.
units : str or None
Units in which the output variables will be provided to the component during execution.
Default is None, which means it has no units.
res_units : str or None
Units in which the residuals of this output will be given to the user when requested.
Default is None, which means it has no units.
desc : str
description of the variable.
lower : float or list or tuple or ndarray or Iterable or None
lower bound(s) in user-defined units. It can be (1) a float, (2) an array_like
consistent with the shape arg (if given), or (3) an array_like matching the shape of
val, if val is array_like. A value of None means this output has no lower bound.
Default is None.
upper : float or list or tuple or ndarray or or Iterable None
upper bound(s) in user-defined units. It can be (1) a float, (2) an array_like
consistent with the shape arg (if given), or (3) an array_like matching the shape of
val, if val is array_like. A value of None means this output has no upper bound.
Default is None.
ref : float or ndarray
Scaling parameter. The value in the user-defined units of this output variable when
the scaled value is 1. Default is 1.
ref0 : float or ndarray
Scaling parameter. The value in the user-defined units of this output variable when
the scaled value is 0. Default is 0.
res_ref : float or ndarray
Scaling parameter. The value in the user-defined res_units of this output's residual
when the scaled value is 1. Default is 1.
tags : str or list of strs or set of strs
User defined tags that can be used to filter what gets listed when calling
list_inputs and list_outputs.
shape_by_conn : bool
If True, shape this output to match its connected input(s).
copy_shape : str or None
If a str, that str is the name of a variable. Shape this output to match that of
the named variable.
Returns
-------
dict
metadata for added variable
"""
# First, type check all arguments
if (shape_by_conn or copy_shape) and (shape is not None or not isscalar(val)):
raise ValueError("%s: If shape is to be set dynamically using 'shape_by_conn' or "
"'copy_shape', 'shape' and 'val' should be scalar, "
"but shape of '%s' and val of '%s' was given for variable '%s'."
% (self.msginfo, shape, val, name))
if not isinstance(name, str):
raise TypeError('%s: The name argument should be a string.' % self.msginfo)
if not _valid_var_name(name):
raise NameError("%s: '%s' is not a valid output name." % (self.msginfo, name))
if not (copy_shape or shape_by_conn):
if not isscalar(val) and not isinstance(val, (list, tuple, ndarray, Iterable)):
msg = '%s: The val argument should be a float, list, tuple, ndarray or Iterable'
raise TypeError(msg % self.msginfo)
if not isscalar(ref) and not isinstance(val, (list, tuple, ndarray, Iterable)):
msg = '%s: The ref argument should be a float, list, tuple, ndarray or Iterable'
raise TypeError(msg % self.msginfo)
if not isscalar(ref0) and not isinstance(val, (list, tuple, ndarray, Iterable)):
msg = '%s: The ref0 argument should be a float, list, tuple, ndarray or Iterable'
raise TypeError(msg % self.msginfo)
if not isscalar(res_ref) and not isinstance(val, (list, tuple, ndarray, Iterable)):
msg = '%s: The res_ref argument should be a float, list, tuple, ndarray or Iterable'
raise TypeError(msg % self.msginfo)
if shape is not None and not isinstance(shape, (int, tuple, list, np.integer)):
raise TypeError("%s: The shape argument should be an int, tuple, or list but "
"a '%s' was given" % (self.msginfo, type(shape)))
if res_units is not None and not isinstance(res_units, str):
raise TypeError('%s: The res_units argument should be a str or None' % self.msginfo)
if units is not None:
if not isinstance(units, str):
raise TypeError('%s: The units argument should be a str or None' % self.msginfo)
if not valid_units(units):
raise ValueError("%s: The units '%s' are invalid" % (self.msginfo, units))
if tags is not None and not isinstance(tags, (str, set, list)):
raise TypeError('The tags argument should be a str, set, or list')
if not (copy_shape or shape_by_conn):
# value, shape: based on args, making sure they are compatible
val, shape, _ = ensure_compatible(name, val, shape)
if lower is not None:
lower = ensure_compatible(name, lower, shape)[0]
self._has_bounds = True
if upper is not None:
upper = ensure_compatible(name, upper, shape)[0]
self._has_bounds = True
# All refs: check the shape if necessary
for item, item_name in zip([ref, ref0, res_ref], ['ref', 'ref0', 'res_ref']):
if not isscalar(item):
it = atleast_1d(item)
if it.shape != shape:
raise ValueError("{}: When adding output '{}', expected shape {} but got "
"shape {} for argument '{}'.".format(self.msginfo, name,
shape, it.shape,
item_name))
if isscalar(ref):
self._has_output_scaling |= ref != 1.0
else:
self._has_output_scaling |= np.any(ref != 1.0)
if isscalar(ref0):
self._has_output_scaling |= ref0 != 0.0
else:
self._has_output_scaling |= np.any(ref0)
if isscalar(res_ref):
self._has_resid_scaling |= res_ref != 1.0
else:
self._has_resid_scaling |= np.any(res_ref != 1.0)
metadata = {
'value': val,
'shape': shape,
'size': shape_to_len(shape),
'units': units,
'res_units': res_units,
'desc': desc,
'distributed': self.options['distributed'],
'tags': make_set(tags),
'ref': format_as_float_or_array('ref', ref, flatten=True),
'ref0': format_as_float_or_array('ref0', ref0, flatten=True),
'res_ref': format_as_float_or_array('res_ref', res_ref, flatten=True),
'lower': lower,
'upper': upper,
'shape_by_conn': shape_by_conn,
'copy_shape': copy_shape
}
# We may not know the pathname yet, so we have to use name for now, instead of abs_name.
if self._static_mode:
var_rel2meta = self._static_var_rel2meta
var_rel_names = self._static_var_rel_names
else:
var_rel2meta = self._var_rel2meta
var_rel_names = self._var_rel_names
# Disallow dupes
if name in var_rel2meta:
raise ValueError("{}: Variable name '{}' already exists.".format(self.msginfo, name))
var_rel2meta[name] = metadata
var_rel_names['output'].append(name)
self._var_added(name)
return metadata
def add_discrete_output(self, name, val, desc='', tags=None):
"""
Add an output variable to the component.
Parameters
----------
name : str
name of the variable in this component's namespace.
val : a picklable object
The initial value of the variable being added.
desc : str
description of the variable.
tags : str or list of strs or set of strs
User defined tags that can be used to filter what gets listed when calling
list_inputs and list_outputs.
Returns
-------
dict
metadata for added variable
"""
if not isinstance(name, str):
raise TypeError('%s: The name argument should be a string.' % self.msginfo)
if not _valid_var_name(name):
raise NameError("%s: '%s' is not a valid output name." % (self.msginfo, name))
if tags is not None and not isinstance(tags, (str, set, list)):
raise TypeError('%s: The tags argument should be a str, set, or list' % self.msginfo)
metadata = {
'value': val,
'type': type(val),
'desc': desc,
'tags': make_set(tags)
}
if metadata['type'] == np.ndarray:
metadata.update({'shape': val.shape})
if self._static_mode:
var_rel2meta = self._static_var_rel2meta
else:
var_rel2meta = self._var_rel2meta
# Disallow dupes
if name in var_rel2meta:
raise ValueError("{}: Variable name '{}' already exists.".format(self.msginfo, name))
var_rel2meta[name] = self._var_discrete['output'][name] = metadata
self._var_added(name)
return metadata
def _var_added(self, name):
"""
Notify config that a variable has been added to this Component.
Parameters
----------
name : str
Name of the added variable.
"""
if self._problem_meta is not None and self._problem_meta['config_info'] is not None:
self._problem_meta['config_info']._var_added(self.pathname, name)
def _update_dist_src_indices(self, abs_in2out, all_abs2meta, all_abs2idx, all_sizes):
"""
Set default src_indices on distributed components for any inputs where they aren't set.
Parameters
----------
abs_in2out : dict
Mapping of connected inputs to their source. Names are absolute.
all_abs2meta : dict
Mapping of absolute names to metadata for all variables in the model.
all_abs2idx : dict
Dictionary mapping an absolute name to its allprocs variable index.
all_sizes : dict
Mapping of vec_names and types to sizes of each variable in all procs.
Returns
-------
set
Names of inputs where src_indices were added.
"""
if not self.options['distributed'] or self.comm.size == 1:
return set()
iproc = self.comm.rank
abs2meta_in = self._var_abs2meta['input']
all_abs2meta_in = all_abs2meta['input']
all_abs2meta_out = all_abs2meta['output']
sizes_in = self._var_sizes['nonlinear']['input']
sizes_out = all_sizes['nonlinear']['output']
added_src_inds = set()
for i, iname in enumerate(self._var_allprocs_abs2meta['input']):
if iname in abs2meta_in and abs2meta_in[iname]['src_indices'] is None:
src = abs_in2out[iname]
out_i = all_abs2idx[src]
nzs = np.nonzero(sizes_out[:, out_i])[0]
if (all_abs2meta_out[src]['global_size'] ==
all_abs2meta_in[iname]['global_size'] or nzs.size == self.comm.size):
# This offset assumes a 'full' distributed output
offset = np.sum(sizes_in[:iproc, i])
end = offset + sizes_in[iproc, i]
else: # distributed output (may have some zero size entries)
if nzs.size == 1:
offset = 0
end = sizes_out[nzs[0], out_i]
else:
# total sizes differ and output is distributed, so can't determine mapping
raise RuntimeError(f"{self.msginfo}: Can't determine src_indices "
f"automatically for input '{iname}'. They must be "
"supplied manually.")
simple_warning(f"{self.msginfo}: Component is distributed but input '{iname}' was "
"added without src_indices. Setting src_indices to "
f"range({offset}, {end}).")
abs2meta_in[iname]['src_indices'] = np.arange(offset, end, dtype=INT_DTYPE)
all_abs2meta_in[iname]['has_src_indices'] = True
added_src_inds.add(iname)
return added_src_inds
def _approx_partials(self, of, wrt, method='fd', **kwargs):
"""
Inform the framework that the specified derivatives are to be approximated.
Parameters
----------
of : str or list of str
The name of the residual(s) that derivatives are being computed for.
May also contain a glob pattern.
wrt : str or list of str
The name of the variables that derivatives are taken with respect to.
This can contain the name of any input or output variable.
May also contain a glob pattern.
method : str
The type of approximation that should be used. Valid options include:
- 'fd': Finite Difference
**kwargs : dict
Keyword arguments for controlling the behavior of the approximation.
"""
pattern_matches = self._find_partial_matches(of, wrt)
self._has_approx = True
for of_bundle, wrt_bundle in product(*pattern_matches):
of_pattern, of_matches = of_bundle
wrt_pattern, wrt_matches = wrt_bundle
if not of_matches:
raise ValueError('{}: No matches were found for of="{}"'.format(self.msginfo,
of_pattern))
if not wrt_matches:
raise ValueError('{}: No matches were found for wrt="{}"'.format(self.msginfo,
wrt_pattern))
info = self._subjacs_info
for rel_key in product(of_matches, wrt_matches):
abs_key = rel_key2abs_key(self, rel_key)
meta = info[abs_key]
meta['method'] = method
meta.update(kwargs)
info[abs_key] = meta
def declare_partials(self, of, wrt, dependent=True, rows=None, cols=None, val=None,
method='exact', step=None, form=None, step_calc=None):
"""
Declare information about this component's subjacobians.
Parameters
----------
of : str or list of str
The name of the residual(s) that derivatives are being computed for.
May also contain a glob pattern.
wrt : str or list of str
The name of the variables that derivatives are taken with respect to.
This can contain the name of any input or output variable.
May also contain a glob pattern.
dependent : bool(True)
If False, specifies no dependence between the output(s) and the
input(s). This is only necessary in the case of a sparse global
jacobian, because if 'dependent=False' is not specified and
declare_partials is not called for a given pair, then a dense
matrix of zeros will be allocated in the sparse global jacobian
for that pair. In the case of a dense global jacobian it doesn't
matter because the space for a dense subjac will always be
allocated for every pair.
rows : ndarray of int or None
Row indices for each nonzero entry. For sparse subjacobians only.
cols : ndarray of int or None
Column indices for each nonzero entry. For sparse subjacobians only.
val : float or ndarray of float or scipy.sparse
Value of subjacobian. If rows and cols are not None, this will
contain the values found at each (row, col) location in the subjac.
method : str
The type of approximation that should be used. Valid options include:
'fd': Finite Difference, 'cs': Complex Step, 'exact': use the component
defined analytic derivatives. Default is 'exact'.
step : float
Step size for approximation. Defaults to None, in which case the approximation
method provides its default value.
form : string
Form for finite difference, can be 'forward', 'backward', or 'central'. Defaults
to None, in which case the approximation method provides its default value.
step_calc : string
Step type for finite difference, can be 'abs' for absolute', or 'rel' for
relative. Defaults to None, in which case the approximation method provides
its default value.
Returns
-------
dict
Metadata dict for the specified partial(s).
"""
try:
method_func = _supported_methods[method]
except KeyError:
msg = '{}: d({})/d({}): method "{}" is not supported, method must be one of {}'
raise ValueError(msg.format(self.msginfo, of, wrt, method, sorted(_supported_methods)))
if isinstance(of, list):
of = tuple(of)
if isinstance(wrt, list):
wrt = tuple(wrt)
meta = self._declared_partials[of, wrt]
meta['dependent'] = dependent
# If only one of rows/cols is specified
if (rows is None) ^ (cols is None):
raise ValueError('{}: d({})/d({}): If one of rows/cols is specified, then '
'both must be specified.'.format(self.msginfo, of, wrt))
if dependent:
meta['value'] = val
if rows is not None:
meta['rows'] = rows
meta['cols'] = cols
# First, check the length of rows and cols to catch this easy mistake and give a
# clear message.
if len(cols) != len(rows):
raise RuntimeError("{}: d({})/d({}): declare_partials has been called "
"with rows and cols, which should be arrays of equal length,"
" but rows is length {} while cols is length "
"{}.".format(self.msginfo, of, wrt, len(rows), len(cols)))
# Check for repeated rows/cols indices.
idxset = set(zip(rows, cols))
if len(rows) - len(idxset) > 0:
dups = [n for n, val in Counter(zip(rows, cols)).items() if val > 1]
raise RuntimeError("{}: d({})/d({}): declare_partials has been called "
"with rows and cols that specify the following duplicate "
"subjacobian entries: {}.".format(self.msginfo, of, wrt,
sorted(dups)))
if method_func is not None:
# we're doing approximations
self._has_approx = True
meta['method'] = method
self._get_approx_scheme(method)
default_opts = method_func.DEFAULT_OPTIONS
# If rows/cols is specified
if rows is not None or cols is not None:
raise ValueError("{}: d({})/d({}): Sparse FD specification not supported "
"yet.".format(self.msginfo, of, wrt))
else:
default_opts = ()
if step:
if 'step' in default_opts:
meta['step'] = step
else:
raise RuntimeError("{}: d({})/d({}): 'step' is not a valid option for "
"'{}'".format(self.msginfo, of, wrt, method))
if form:
if 'form' in default_opts:
meta['form'] = form
else:
raise RuntimeError("{}: d({})/d({}): 'form' is not a valid option for "
"'{}'".format(self.msginfo, of, wrt, method))
if step_calc:
if 'step_calc' in default_opts:
meta['step_calc'] = step_calc
else:
raise RuntimeError("{}: d({})/d({}): 'step_calc' is not a valid option "
"for '{}'".format(self.msginfo, of, wrt, method))
return meta
def declare_coloring(self,
wrt=_DEFAULT_COLORING_META['wrt_patterns'],
method=_DEFAULT_COLORING_META['method'],
form=None,
step=None,
per_instance=_DEFAULT_COLORING_META['per_instance'],
num_full_jacs=_DEFAULT_COLORING_META['num_full_jacs'],
tol=_DEFAULT_COLORING_META['tol'],
orders=_DEFAULT_COLORING_META['orders'],
perturb_size=_DEFAULT_COLORING_META['perturb_size'],
min_improve_pct=_DEFAULT_COLORING_META['min_improve_pct'],
show_summary=_DEFAULT_COLORING_META['show_summary'],
show_sparsity=_DEFAULT_COLORING_META['show_sparsity']):
"""
Set options for deriv coloring of a set of wrt vars matching the given pattern(s).
Parameters
----------
wrt : str or list of str
The name or names of the variables that derivatives are taken with respect to.
This can contain input names, output names, or glob patterns.
method : str
Method used to compute derivative: "fd" for finite difference, "cs" for complex step.
form : str
Finite difference form, can be "forward", "central", or "backward". Leave
undeclared to keep unchanged from previous or default value.
step : float
Step size for finite difference. Leave undeclared to keep unchanged from previous
or default value.
per_instance : bool
If True, a separate coloring will be generated for each instance of a given class.
Otherwise, only one coloring for a given class will be generated and all instances
of that class will use it.
num_full_jacs : int
Number of times to repeat partial jacobian computation when computing sparsity.
tol : float
Tolerance used to determine if an array entry is nonzero during sparsity determination.
orders : int
Number of orders above and below the tolerance to check during the tolerance sweep.
perturb_size : float
Size of input/output perturbation during generation of sparsity.
min_improve_pct : float
If coloring does not improve (decrease) the number of solves more than the given
percentage, coloring will not be used.
show_summary : bool
If True, display summary information after generating coloring.
show_sparsity : bool
If True, display sparsity with coloring info after generating coloring.
"""
super().declare_coloring(wrt, method, form, step, per_instance,
num_full_jacs,
tol, orders, perturb_size, min_improve_pct,
show_summary, show_sparsity)
# create approx partials for all matches
meta = self.declare_partials('*', wrt, method=method, step=step, form=form)
meta['coloring'] = True
def set_check_partial_options(self, wrt, method='fd', form=None, step=None, step_calc=None,
directional=False):
"""
Set options that will be used for checking partial derivatives.
Parameters
----------
wrt : str or list of str
The name or names of the variables that derivatives are taken with respect to.
This can contain the name of any input or output variable.
May also contain a glob pattern.
method : str
Method for check: "fd" for finite difference, "cs" for complex step.
form : str
Finite difference form for check, can be "forward", "central", or "backward". Leave
undeclared to keep unchanged from previous or default value.
step : float
Step size for finite difference check. Leave undeclared to keep unchanged from previous
or default value.
step_calc : str
Type of step calculation for check, can be "abs" for absolute (default) or "rel" for
relative. Leave undeclared to keep unchanged from previous or default value.
directional : bool
Set to True to perform a single directional derivative for each vector variable in the
pattern named in wrt.
"""
supported_methods = ('fd', 'cs')
if method not in supported_methods:
msg = "{}: Method '{}' is not supported, method must be one of {}"
raise ValueError(msg.format(self.msginfo, method, supported_methods))
if step and not isinstance(step, (int, float)):
msg = "{}: The value of 'step' must be numeric, but '{}' was specified."
raise ValueError(msg.format(self.msginfo, step))
supported_step_calc = ('abs', 'rel')
if step_calc and step_calc not in supported_step_calc:
msg = "{}: The value of 'step_calc' must be one of {}, but '{}' was specified."
raise ValueError(msg.format(self.msginfo, supported_step_calc, step_calc))
if not isinstance(wrt, (str, list, tuple)):
msg = "{}: The value of 'wrt' must be a string or list of strings, but a type " \
"of '{}' was provided."
raise ValueError(msg.format(self.msginfo, type(wrt).__name__))
if not isinstance(directional, bool):
msg = "{}: The value of 'directional' must be True or False, but a type " \
"of '{}' was provided."
raise ValueError(msg.format(self.msginfo, type(directional).__name__))
wrt_list = [wrt] if isinstance(wrt, str) else wrt
self._declared_partial_checks.append((wrt_list, method, form, step, step_calc,
directional))
def _get_check_partial_options(self, include_wrt_outputs=True):
"""
Return dictionary of partial options with pattern matches processed.
This is called by check_partials.
Parameters
----------
include_wrt_outputs : bool
If True, include outputs in the wrt list.
Returns
-------
dict(wrt : (options))
Dictionary keyed by name with tuples of options (method, form, step, step_calc)
"""
opts = {}
of, wrt = self._get_potential_partials_lists(include_wrt_outputs=include_wrt_outputs)
invalid_wrt = []
matrix_free = self.matrix_free
if matrix_free:
n_directional = 0
for wrt_list, method, form, step, step_calc, directional in self._declared_partial_checks:
for pattern in wrt_list:
matches = find_matches(pattern, wrt)
# if a non-wildcard var name was specified and not found, save for later Exception
if len(matches) == 0 and _valid_var_name(pattern):
invalid_wrt.append(pattern)
for match in matches:
if match in opts:
opt = opts[match]
# New assignments take precedence
keynames = ['method', 'form', 'step', 'step_calc', 'directional']
for name, value in zip(keynames,
[method, form, step, step_calc, directional]):
if value is not None:
opt[name] = value
else:
opts[match] = {'method': method,
'form': form,
'step': step,
'step_calc': step_calc,
'directional': directional}
if matrix_free and directional:
n_directional += 1
if invalid_wrt:
msg = "{}: Invalid 'wrt' variables specified for check_partial options: {}."
raise ValueError(msg.format(self.msginfo, invalid_wrt))
if matrix_free:
if n_directional > 0 and n_directional < len(wrt):
msg = "{}: For matrix free components, directional should be set to True for " + \
"all inputs."
raise ValueError(msg.format(self.msginfo))
return opts
def _declare_partials(self, of, wrt, dct, quick_declare=False):
"""
Store subjacobian metadata for later use.
Parameters
----------
of : tuple of str
The names of the residuals that derivatives are being computed for.
May also contain glob patterns.
wrt : tuple of str
The names of the variables that derivatives are taken with respect to.
This can contain the name of any input or output variable.
May also contain glob patterns.
dct : dict
Metadata dict specifying shape, and/or approx properties.
quick_declare : bool
This is set to True when declaring the jacobian diagonal terms for explicit
components. The checks and conversions are all skipped to improve performance for
cases with large numbers of explicit components or indepvarcomps.
"""
if quick_declare:
abs_key = rel_key2abs_key(self, (of, wrt))
meta = {}
meta['rows'] = np.array(dct['rows'], dtype=INT_DTYPE, copy=False)
meta['cols'] = np.array(dct['cols'], dtype=INT_DTYPE, copy=False)
meta['shape'] = (len(dct['rows']), len(dct['cols']))
meta['value'] = dct['value']
self._subjacs_info[abs_key] = meta
return
val = dct['value'] if 'value' in dct else None
is_scalar = isscalar(val)
dependent = dct['dependent']
if dependent:
if 'rows' in dct and dct['rows'] is not None: # sparse list format
rows = dct['rows']
cols = dct['cols']
rows = np.array(rows, dtype=INT_DTYPE, copy=False)
cols = np.array(cols, dtype=INT_DTYPE, copy=False)
if rows.shape != cols.shape:
raise ValueError('{}: d({})/d({}): rows and cols must have the same shape,'
' rows: {}, cols: {}'.format(self.msginfo, of, wrt,
rows.shape, cols.shape))
if is_scalar:
val = np.full(rows.size, val, dtype=float)
is_scalar = False
elif val is not None:
# np.promote_types will choose the smallest dtype that can contain
# both arguments
val = atleast_1d(val)
safe_dtype = promote_types(val.dtype, float)
val = val.astype(safe_dtype, copy=False)
if rows.shape != val.shape:
raise ValueError('{}: d({})/d({}): If rows and cols are specified, val '
'must be a scalar or have the same shape, val: {}, '
'rows/cols: {}'.format(self.msginfo, of, wrt,
val.shape, rows.shape))
else:
val = np.zeros_like(rows, dtype=float)
if rows.size > 0:
if rows.min() < 0:
msg = '{}: d({})/d({}): row indices must be non-negative'
raise ValueError(msg.format(self.msginfo, of, wrt))
if cols.min() < 0:
msg = '{}: d({})/d({}): col indices must be non-negative'
raise ValueError(msg.format(self.msginfo, of, wrt))
rows_max = rows.max()
cols_max = cols.max()
else:
rows_max = cols_max = 0
else:
if val is not None and not is_scalar and not issparse(val):
val = atleast_2d(val)
val = val.astype(promote_types(val.dtype, float), copy=False)
rows_max = cols_max = 0
rows = None
cols = None
pattern_matches = self._find_partial_matches(of, wrt)
abs2meta_in = self._var_abs2meta['input']
abs2meta_out = self._var_abs2meta['output']
is_array = isinstance(val, ndarray)
patmeta = dict(dct)
patmeta_not_none = {k: v for k, v in dct.items() if v is not None}
for of_bundle, wrt_bundle in product(*pattern_matches):
of_pattern, of_matches = of_bundle
wrt_pattern, wrt_matches = wrt_bundle
if not of_matches:
raise ValueError('{}: No matches were found for of="{}"'.format(self.msginfo,
of_pattern))
if not wrt_matches:
raise ValueError('{}: No matches were found for wrt="{}"'.format(self.msginfo,
wrt_pattern))
for rel_key in product(of_matches, wrt_matches):
abs_key = rel_key2abs_key(self, rel_key)
if not dependent:
if abs_key in self._subjacs_info:
del self._subjacs_info[abs_key]
continue
if abs_key in self._subjacs_info:
meta = self._subjacs_info[abs_key]
meta.update(patmeta_not_none)
else:
meta = patmeta.copy()
of, wrt = abs_key
meta['rows'] = rows
meta['cols'] = cols
csz = abs2meta_in[wrt]['size'] if wrt in abs2meta_in else abs2meta_out[wrt]['size']
meta['shape'] = shape = (abs2meta_out[of]['size'], csz)
if shape[0] == 0 or shape[1] == 0:
msg = "{}: '{}' is an array of size 0"
if shape[0] == 0:
if not abs2meta_out[of]['distributed']:
# non-distributed components are not allowed to have zero size inputs
raise ValueError(msg.format(self.msginfo, of))
else:
# distributed comp are allowed to have zero size inputs on some procs
rows_max = -1
if shape[1] == 0:
if wrt in abs2meta_in:
distrib = abs2meta_in[wrt]['distributed']
else:
distrib = abs2meta_out[wrt]['distributed']
if not distrib:
# non-distributed components are not allowed to have zero size outputs
raise ValueError(msg.format(self.msginfo, wrt))
else:
# distributed comp are allowed to have zero size outputs on some procs
cols_max = -1
if val is None:
# we can only get here if rows is None (we're not sparse list format)
meta['value'] = | np.zeros(shape) | numpy.zeros |
from heuslertools.tools.measurement import Measurement
import xrayutilities as xu
import warnings
import numpy as np
class XRDMeasurement(Measurement):
"""Object representing xrd measurement.
Parameters
----------
file : str
path of xrdml file.
Attributes
----------
en : str
Energy of xrays.
wavelength : type
wavelength of xrays.
resol : float
resolution in qz.
"""
def __init__(self, file):
super().__init__(file, "")
self.en = 'CuKa1'
self.wavelength = xu.wavelength('CuKa1')
self.resol = 2e-10 # resolution in qz
def _load_data(self):
self.xrdml = xu.io.XRDMLFile(self.file)
return self.xrdml.scans[0].ddict
def append_measurement(self, file):
xrdml = xu.io.XRDMLFile(file)
data = xrdml.scans[0].ddict
for key in data.keys():
self.data[key] = | np.append(self.data[key], data[key]) | numpy.append |
import numpy as np
import os
from keras.models import load_model
from src.python.dataset import dataset
CONSENSUS_SUMMARY_CMD_1 = '{}/mummer3.23/dnadiff -p {}/dnadiff-output {} {} ' \
'2>> {}/err'
CONSENSUS_SUMMARY_CMD_2 = 'head -n 24 {}/dnadiff-output.report | tail -n 20'
RESULT_CMD_1 = 'mkdir -p {}'
RESULT_CMD_2 = 'cp {}/dnadiff-output.report {}'
def _convert_predictions_to_genome(predictions):
mapping = {0: 'A', 1: 'C', 2: 'G', 3: 'T', 4: '', 5: 'N'}
genome = [mapping[prediction] for prediction in predictions]
return genome
def _write_genome_to_fasta(contigs, fasta_file_path, contig_names):
with open(fasta_file_path, 'w') as f:
for contig, contig_name in zip(contigs, contig_names):
contig = ''.join(contig)
f.write('>{} LN:{}\n'.format(contig_name, len(contig)))
f.write('{}\n'.format(contig))
def make_consensus(model_path, reference_path, pileup_generator,
neighbourhood_size, output_dir, tools_dir, result_file_path):
"""
Creates consensus which is polished by trained neural network.
Pileup generator creates pileups by using it's own pileup strategy. Those
pileups are used to create dataset (examples with given neighbourhood
size). Given model makes predictions (contigs) for created dataset. Those
contigs are concatenated the same way as in .sam file. At last, polished
genome is compared with reference.
:param model_path: Path to trained model.
:type model_path: str
:param reference_path: Path to reference.
:type reference_path: str
:param pileup_generator: Pileup Generator object which creates pileups
using it's own strategy.
:type pileup_generator: PileupGenerator
:param neighbourhood_size: Number of neighbours to use from one size (eg.
if you set this parameter to 3, it will take 3 neighbours from both
sides so total number of positions in one example will be 7 -
counting the middle position).
:type neighbourhood_size: int
:param output_dir: Path to output directory. There will all outputs be
saved.
:type output_dir: str
:param tools_dir: Path to directory where are used tools are installed.
:type tools_dir: str
:param result_file_path: Path where will results be copied to.
:type result_file_path: str
"""
# TODO(ajuric): Currently, y is also created while calculating consensus,
# due to reuising existing code from training. But, here in inference y
# is not used. This needs to be removed to reduce the unnecessary overhead.
if os.path.exists(output_dir):
raise FileExistsError('Given directory already exists: {}! Provide '
'non-existing directory.')
os.makedirs(output_dir)
print('----> Create pileups from assembly. <----')
X, y, contig_names = \
pileup_generator.generate_pileups()
print('----> Create dataset with neighbourhood from pileups. <----')
X, y = dataset.create_dataset_with_neighbourhood(
neighbourhood_size,
mode='inference',
X_list=X,
y_list=y)
print('----> Load model and make predictions (consensus). <----')
model = load_model(model_path)
contigs = list()
for X, y, contig_name in zip(X, y, contig_names):
probabilities = model.predict(X)
predictions = np.argmax(probabilities, axis=1)
contig = _convert_predictions_to_genome(predictions)
contigs.append(contig)
consensus_path = os.path.join(output_dir, 'consensus.fasta')
_write_genome_to_fasta(contigs, consensus_path, contig_names)
print('----> Create consensus summary. <----')
os.system(CONSENSUS_SUMMARY_CMD_1.format(tools_dir, output_dir,
reference_path,
consensus_path, output_dir))
os.system(CONSENSUS_SUMMARY_CMD_2.format(output_dir))
result_directory = os.path.dirname(result_file_path)
os.system(RESULT_CMD_1.format(result_directory))
os.system(RESULT_CMD_2.format(output_dir, result_file_path))
# @TODO(ajuric): Refactor this consensus methods.
def make_consensus_before_shapeing_tmp(X_path, y_path, model_path,
output_dir, tools_dir,
reference_path, contig):
print('----> Reshape dataset for convolutional network. <----')
X, y = dataset.read_dataset_and_reshape_for_conv(X_path, y_path)
print('----> Load model and make predictions (consensus). <----')
model = load_model(model_path)
probabilities = model.predict(X)
predictions = | np.argmax(probabilities, axis=1) | numpy.argmax |
import numpy as np
from ..boundingregion import BoundingBox
from ..dimension import Dimension
from ..element import Element
from ..ndmapping import NdMapping, item_check
from ..sheetcoords import Slice
from .. import util
from .grid import GridInterface
from .interface import Interface
class ImageInterface(GridInterface):
"""
Interface for 2 or 3D arrays representing images
of raw luminance values, RGB values or HSV values.
"""
types = (np.ndarray,)
datatype = 'image'
@classmethod
def init(cls, eltype, data, kdims, vdims):
if kdims is None:
kdims = eltype.kdims
if vdims is None:
vdims = eltype.vdims
kwargs = {}
dimensions = [d.name if isinstance(d, Dimension) else
d for d in kdims + vdims]
if isinstance(data, tuple):
data = dict(zip(dimensions, data))
if isinstance(data, dict):
xs, ys = np.asarray(data[kdims[0].name]), np.asarray(data[kdims[1].name])
l, r, xdensity, invertx = util.bound_range(xs, None, eltype._time_unit)
b, t, ydensity, inverty = util.bound_range(ys, None, eltype._time_unit)
kwargs['bounds'] = BoundingBox(points=((l, b), (r, t)))
if len(vdims) == 1:
data = np.flipud(np.asarray(data[vdims[0].name]))
else:
data = np.dstack([np.flipud(data[vd.name]) for vd in vdims])
if invertx:
data = data[:, ::-1]
if inverty:
data = data[::-1, :]
if not isinstance(data, np.ndarray) or data.ndim not in [2, 3]:
raise ValueError('ImageInterface expects a 2D array.')
return data, {'kdims':kdims, 'vdims':vdims}, kwargs
@classmethod
def shape(cls, dataset, gridded=False):
if gridded:
return dataset.data.shape
else:
return cls.length(dataset), len(dataset.dimensions())
@classmethod
def length(cls, dataset):
return np.product(dataset.data.shape)
@classmethod
def validate(cls, dataset, vdims=True):
pass
@classmethod
def redim(cls, dataset, dimensions):
return dataset.data
@classmethod
def reindex(cls, dataset, kdims=None, vdims=None):
data = dataset.data
dropped_kdims = [kd for kd in dataset.kdims if kd not in kdims]
constant = {}
for kd in dropped_kdims:
vals = cls.values(dataset, kd.name, expanded=False)
if len(vals) == 1:
constant[kd.name] = vals[0]
if dropped_kdims or constant:
return tuple(dataset.columns(kdims+vdims).values())
if vdims is not None and vdims != dataset.vdims and len(dataset.vdims) > 1:
inds = [dataset.get_dimension_index(vd)-dataset.ndims for vd in vdims]
return data[..., inds] if len(inds) > 1 else data[..., inds[0]]
return data
@classmethod
def coords(cls, dataset, dim, ordered=False, expanded=False):
dim = dataset.get_dimension(dim, strict=True)
if expanded:
return util.expand_grid_coords(dataset, dim)
return cls.values(dataset, dim, expanded=False)
@classmethod
def range(cls, obj, dim):
dim_idx = obj.get_dimension_index(dim)
if dim_idx in [0, 1] and obj.bounds:
l, b, r, t = obj.bounds.lbrt()
if dim_idx:
(low, high) = (b, t)
density = obj.ydensity
else:
low, high = (l, r)
density = obj.xdensity
halfd = (1./density)/2.
if isinstance(low, util.datetime_types):
halfd = np.timedelta64(int(round(halfd)), obj._time_unit)
drange = (low+halfd, high-halfd)
elif 1 < dim_idx < len(obj.vdims) + 2:
dim_idx -= 2
data = np.atleast_3d(obj.data)[:, :, dim_idx]
drange = (np.nanmin(data), np.nanmax(data))
else:
drange = (None, None)
return drange
@classmethod
def values(cls, dataset, dim, expanded=True, flat=True):
"""
The set of samples available along a particular dimension.
"""
dim_idx = dataset.get_dimension_index(dim)
if dim_idx in [0, 1]:
l, b, r, t = dataset.bounds.lbrt()
dim2, dim1 = dataset.data.shape[:2]
if isinstance(l, util.datetime_types):
xlin = util.date_range(l, r, dim1, dataset._time_unit)
else:
xstep = float(r - l)/dim1
xlin = np.linspace(l+(xstep/2.), r-(xstep/2.), dim1)
if isinstance(b, util.datetime_types):
ylin = util.date_range(b, t, dim2, dataset._time_unit)
else:
ystep = float(t - b)/dim2
ylin = np.linspace(b+(ystep/2.), t-(ystep/2.), dim2)
if expanded:
values = np.meshgrid(ylin, xlin)[abs(dim_idx-1)]
return values.flatten() if flat else values
else:
return ylin if dim_idx else xlin
elif dataset.ndims <= dim_idx < len(dataset.dimensions()):
# Raster arrays are stored with different orientation
# than expanded column format, reorient before expanding
if dataset.data.ndim > 2:
data = dataset.data[:, :, dim_idx-dataset.ndims]
else:
data = dataset.data
data = np.flipud(data)
return data.T.flatten() if flat else data
else:
return None
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
"""
Slice the underlying numpy array in sheet coordinates.
"""
selection = {k: slice(*sel) if isinstance(sel, tuple) else sel
for k, sel in selection.items()}
coords = tuple(selection[kd.name] if kd.name in selection else slice(None)
for kd in dataset.kdims)
if not any([isinstance(el, slice) for el in coords]):
return dataset.data[dataset.sheet2matrixidx(*coords)]
# Apply slices
xidx, yidx = coords
l, b, r, t = dataset.bounds.lbrt()
if isinstance(xidx, slice):
l = l if xidx.start is None else max(l, xidx.start)
r = r if xidx.stop is None else min(r, xidx.stop)
if isinstance(yidx, slice):
b = b if yidx.start is None else max(b, yidx.start)
t = t if yidx.stop is None else min(t, yidx.stop)
bounds = BoundingBox(points=((l, b), (r, t)))
slc = Slice(bounds, dataset)
return slc.submatrix(dataset.data)
@classmethod
def sample(cls, dataset, samples=[]):
"""
Sample the Raster along one or both of its dimensions,
returning a reduced dimensionality type, which is either
a ItemTable, Curve or Scatter. If two dimension samples
and a new_xaxis is provided the sample will be the value
of the sampled unit indexed by the value in the new_xaxis
tuple.
"""
if len(samples[0]) == 1:
select = {dataset.kdims[0].name: [s[0] for s in samples]}
return tuple(dataset.select(**select).columns().values())
return [c+(dataset.data[dataset._coord2matrix(c)],) for c in samples]
@classmethod
def groupby(cls, dataset, dim_names, container_type, group_type, **kwargs):
# Get dimensions information
dimensions = [dataset.get_dimension(d) for d in dim_names]
kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions]
# Update the kwargs appropriately for Element group types
group_kwargs = {}
group_type = dict if group_type == 'raw' else group_type
if issubclass(group_type, Element):
group_kwargs.update(util.get_param_values(dataset))
group_kwargs['kdims'] = kdims
group_kwargs.update(kwargs)
if len(dimensions) == 1:
didx = dataset.get_dimension_index(dimensions[0])
coords = dataset.dimension_values(dimensions[0], expanded=False)
xvals = dataset.dimension_values(abs(didx-1), expanded=False)
samples = [(i, slice(None)) if didx else (slice(None), i)
for i in range(dataset.data.shape[abs(didx-1)])]
data = np.flipud(dataset.data)
groups = [(c, group_type((xvals, data[s]), **group_kwargs))
for s, c in zip(samples, coords)]
else:
data = zip(*[dataset.dimension_values(i) for i in range(len(dataset.dimensions()))])
groups = [(g[:dataset.ndims], group_type([g[dataset.ndims:]], **group_kwargs))
for g in data]
if issubclass(container_type, NdMapping):
with item_check(False):
return container_type(groups, kdims=dimensions)
else:
return container_type(groups)
@classmethod
def unpack_scalar(cls, dataset, data):
"""
Given a dataset object and data in the appropriate format for
the interface, return a simple scalar.
"""
if | np.isscalar(data) | numpy.isscalar |
#!/usr/bin/env python
import rospy
import numpy as np
import tf
import tf.msg
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from geometry_msgs.msg import PoseStamped, Quaternion, AccelStamped, TwistStamped, TransformStamped, PointStamped, Point
from visualization_msgs.msg import Marker
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2
from auto_messages.msg import from_autobox
from std_msgs.msg import ColorRGBA
from safe_traffic_weaving.msg import XYThV
from utils.math_utils import *
class Position:
def __init__(self, x=[0, 0, 0, 0, 0, 0]):
self.x = x[0]
self.y = x[1]
self.xd = x[2]
self.yd = x[3]
self.xdd = x[4]
self.ydd = x[5]
def to_list(self):
return [self.x, self.y, self.xd, self.yd, self.xdd, self.ydd]
class Orientation:
def __init__(self, w=[0, 0, 0, 1], dtheta = 0, dtheta_prev = 0):
self.w0 = w[0]
self.w1 = w[1]
self.w2 = w[2]
self.w3 = w[3]
self.dtheta = dtheta
self.dtheta_prev = dtheta_prev
def to_list(self):
return [self.w0, self.w1, self.w2, self.w3]
class State:
def __init__(self, pos = Position(), orien = Orientation(), frame=None):
self.position = pos
self.orientation = orien
self.frame = frame
class x1lidar:
def __init__(self, intensity_tol=100):
self.intensity_tol = intensity_tol
self.tf_broadcaster = tf.TransformBroadcaster()
self.tf_listener = tf.TransformListener()
rospy.init_node("visualize_lidar", anonymous=True)
# rospy.Subscriber("from_veh", fromVehicle, self.egocar_callback)
# in world frame
self.trackedObject_pose_pub = rospy.Publisher("/tracked_object/pose", PoseStamped, queue_size=10)
self.trackedObject_vel_pub = rospy.Publisher("/tracked_object/vel", TwistStamped, queue_size=10)
self.trackedObject_accel_pub = rospy.Publisher("/tracked_object/acc", AccelStamped, queue_size=10)
self.trackedObject_xythv_pub = rospy.Publisher("/tracked_object/xythv", XYThV, queue_size=10)
self.relevant_pc = rospy.Publisher("/tracked_object/pc_viz", Marker, queue_size=10)
self.ellipse = rospy.Publisher("/tracked_object/ellipse_viz", Marker, queue_size=10)
# in world frame
# self.x1_marker_pub = rospy.Publisher("/x1/marker", Marker, queue_size=10)
rospy.Subscriber("/x1/pose", PoseStamped, self.x1_position)
rospy.Subscriber("/move_base_simple/goal", PoseStamped, self.initialize_trackedObject)
# rospy.Subscriber("/M_HDL32/velodyne_points", PointCloud2, self.pc2_callback, queue_size=1, buff_size=2**24)
rospy.Subscriber("/M_intensity_filter/output", PointCloud2, self.pc2_callback, queue_size=1, buff_size=2**24)
# rospy.Subscriber("/FL_VLP16/velodyne_points", PointCloud2, self.pc2_callback, queue_size=1, buff_size=2**24)
rospy.Subscriber("/FL_intensity_filter/output", PointCloud2, self.pc2_callback, queue_size=1, buff_size=2**24)
# rospy.Subscriber("/FR_VLP16/velodyne_points", PointCloud2, self.pc2_callback, queue_size=1, buff_size=2**24)
rospy.Subscriber("/FR_intensity_filter/output", PointCloud2, self.pc2_callback, queue_size=1, buff_size=2**24)
# pc array marker
self.pc_marker = Marker()
self.pc_marker.ns = "considered_pc"
self.pc_marker.type = Marker.POINTS
self.pc_marker.scale.x = 0.05
self.pc_marker.scale.y = 0.05
self.pc_marker.frame_locked = True
# ellipse array marker
self.ellipse_marker = Marker()
self.ellipse_marker.header.frame_id = "/world"
self.ellipse_marker.ns = "ellipse"
self.ellipse_marker.type = Marker.LINE_STRIP
self.ellipse_marker.scale.x = 0.01
self.ellipse_marker.frame_locked = True
self.ellipse_marker.color.g = 1
self.ellipse_marker.color.a = 1
# in world frame
self.trackedObjectState = None
self.x1State = None
self.initialize_flag = False
self.x1_frame_init = False
self.dt = None
self.prev_time = 0 # to be overwritten
self.curr_time = 0.1 # to be overwritten
self.lost_count = 0
self.processing = False
def pose_twist_accelStamped_pub(self, state, pose_pub, vel_pub, accel_pub, xythv_pub, header_frame_id, timestamp):
self.tf_broadcaster.sendTransform((state.position.x, state.position.y, 0),
state.orientation.to_list(),
timestamp,
"tracked_object",
header_frame_id)
pose_msg = PoseStamped()
pose_msg.header.frame_id = header_frame_id
pose_msg.header.stamp = timestamp
pose_msg.pose.position.x = state.position.x
pose_msg.pose.position.y = state.position.y
pose_msg.pose.orientation = Quaternion(*state.orientation.to_list())
pose_pub.publish(pose_msg)
vel_msg = TwistStamped()
vel_msg.header.frame_id = header_frame_id
vel_msg.header.stamp = timestamp
vel_msg.twist.linear.x = state.position.xd
vel_msg.twist.linear.y = state.position.yd
vel_msg.twist.angular.z = state.orientation.dtheta
vel_pub.publish(vel_msg)
accel_msg = AccelStamped()
accel_msg.header.frame_id = header_frame_id
accel_msg.header.stamp = timestamp
accel_msg.accel.linear.x = state.position.xdd
accel_msg.accel.linear.y = state.position.ydd
accel_pub.publish(accel_msg)
xythv_msg = XYThV()
xythv_msg.x = state.position.x
xythv_msg.y = state.position.y
xythv_msg.th = np.arctan2(state.position.yd, state.position.xd)
xythv_msg.v = np.hypot(state.position.xd, state.position.yd)
xythv_pub.publish(xythv_msg)
def x1_position(self, msg):
# PoseStamped msg
self.x1State = msg
def initialize_trackedObject(self, msg):
# transform nav_goal message to the world frame
msg_time = msg.header.stamp
msg_frame = msg.header.frame_id
self.prev_time = msg.header.stamp.to_time()
try:
self.tf_listener.waitForTransform("/world", msg_frame, msg_time, rospy.Duration(.5))
pose_world = self.tf_listener.transformPose("/world", msg)
except:
rospy.logwarn("Could not transform from vehicle base to World coordinates")
# pose_world = self.tf_listener.transformPose('/world', msg)
ori = Orientation([pose_world.pose.orientation.x,
pose_world.pose.orientation.y,
pose_world.pose.orientation.z,
pose_world.pose.orientation.w])
th = euler_from_quaternion(ori.to_list())
pos = Position([pose_world.pose.position.x, pose_world.pose.position.y, np.cos(th[2]), np.sin(th[2]), 0, 0])
self.trackedObjectState = State(pos, ori, '/world')
# this publishes the transform, and this will enable the viz_vehicle file to plot the tracked_object
self.pose_twist_accelStamped_pub(self.trackedObjectState,
self.trackedObject_pose_pub,
self.trackedObject_vel_pub,
self.trackedObject_accel_pub,
self.trackedObject_xythv_pub,
'/world',
msg_time)
# flag to say that it has been initialized.
self.initialize_flag = True
# self.ellipse_marker.header.frame_id = msg_frame
# self.ellipse_marker.points = []
# center = np.array([[pos.x], [pos.y]])
# ellipse_points = self.get_ellipse(3) + center
# for i in range(ellipse_points.shape[-1]):
# self.ellipse_marker.points.append(Point(ellipse_points[0,i], ellipse_points[1,i], 0))
# self.ellipse.publish(self.ellipse_marker)
self.initialize_EKF()
rospy.loginfo("Initialized the marker!")
initial_position = Position((self.x1State.pose.position.x, self.x1State.pose.position.y, 0, 0, 0, 0))
initial_orientation = Orientation((self.x1State.pose.orientation.x, self.x1State.pose.orientation.y, self.x1State.pose.orientation.z, self.x1State.pose.orientation.w))
self.initial_state = State(initial_position, initial_orientation, '/world')
# make local frame. The frame where x1 is originally.
self.tf_broadcaster.sendTransform((self.x1State.pose.position.x, self.x1State.pose.position.y, 0),
initial_orientation.to_list(),
self.x1State.header.stamp,
'/local',
'/world')
def get_ellipse(self, sd_tol, var):
L = np.linalg.cholesky(var)
LHinv = np.linalg.inv(L.T.conj())
th = np.arange(0, 2*np.pi+1, 0.1)
y1 = sd_tol * np.cos(th)
y2 = sd_tol * np.sin(th)
y = np.stack([y1,y2])
return np.dot(LHinv, y)
# tracking the other car
def pc2_callback(self, msg, min_points = 2, vel_tol = 0.1, alpha = 0.95, sd = 3.0):
intensity_max = 500
if self.processing:
rospy.logwarn("Callback is busy")
return
rospy.loginfo("Receiving points from velodyne %s", msg.header.frame_id)
msg_time = msg.header.stamp
msg_frame = msg.header.frame_id
self.pc_marker.header.frame_id = msg_frame
self.pc_marker.points = []
self.pc_marker.colors = []
self.ellipse_marker.points = []
self.curr_time = msg_time.to_time()
if self.dt:
dt = self.curr_time - self.prev_time
else:
dt = 0.1
if dt < 0:
rospy.logwarn("Message is too old")
self.processing = False
return
else:
self.dt = dt
self.prev_time = msg_time.to_time()
if self.initialize_flag:
self.processing = True
self.tf_broadcaster.sendTransform((self.initial_state.position.x, self.initial_state.position.y, 0.0),
self.initial_state.orientation.to_list(),
msg_time,
'/local',
'/world')
rospy.loginfo("Got velodyne points, getting x, y, z....")
lidar_info = pc2.read_points(msg, skip_nans=True, field_names=("x", "y", "intensity"))
x_list, y_list, intensities = [], [], []
# in world frame
xy_var = self.var[:2,:2]
inv_xy_var = np.linalg.inv(xy_var)
pt = PointStamped()
pt.header.frame_id = '/world'
pt.header.stamp = msg_time
pt.point.x = self.trackedObjectState.position.x
pt.point.y = self.trackedObjectState.position.y
distance = np.hypot(self.trackedObjectState.position.x - self.x1State.pose.position.x,
self.trackedObjectState.position.y - self.x1State.pose.position.y)
sd_tol = 2*sd if distance > 5 else sd
# trackedObjectPoints_velodyne = self.tf_listener.transformPoint("/velodyne", pt)
try:
self.tf_listener.waitForTransform(msg_frame, "/world", msg_time, rospy.Duration(.05))
trackedObjectPoints_velodyne = self.tf_listener.transformPoint(msg_frame, pt)
(trans, rot) = self.tf_listener.lookupTransform('/world', msg_frame, msg_time)
th = euler_from_quaternion(rot)
R = rot_mat(th[2])
# variance for velodyne points in the velodyne frame but want to compute it in the world frame
inv_var_rot = np.matmul(np.matmul(R, inv_xy_var),R.T)
except:
rospy.logwarn("Could not transform from World to Velodyne coordinates")
self.processing = False
return
center = np.array([[self.trackedObjectState.position.x], [self.trackedObjectState.position.y]])
ellipse_points = self.get_ellipse(sd_tol, inv_xy_var) + center
for i in range(ellipse_points.shape[-1]):
self.ellipse_marker.points.append(Point(ellipse_points[0,i], ellipse_points[1,i], 0))
self.ellipse.publish(self.ellipse_marker)
# trackedObjectPoints_velodyne = self.tf_listener.transformPoint(msg_frame, pt)
for p in lidar_info:
if p[2] > self.intensity_tol:
xy = np.array([[p[0] - trackedObjectPoints_velodyne.point.x], [p[1] - trackedObjectPoints_velodyne.point.y]])
# Mahalanobis distance
dist = np.matmul(np.matmul(xy.T, inv_var_rot), xy)
# xy[0]**2*inv_var_rot[0,0] + xy[1]**2*inv_var_rot[1,1] + 2*xy[0]*xy[1]*inv_var_rot[1,0]
# rospy.logwarn("intensity tolerance met")
if dist < sd_tol**2:
intensity_frac = max(p[2] / intensity_max, 1)
self.pc_marker.colors.append(ColorRGBA(1 - intensity_frac, intensity_frac, 0.0, 1.0))
self.pc_marker.points.append(Point(p[0], p[1], 0))
x_list.append(p[0])
y_list.append(p[1])
self.relevant_pc.publish(self.pc_marker)
if len(x_list) < min_points:
rospy.logwarn("%s did not receive points", msg.header.frame_id)
self.lost_count += 1
if self.lost_count > 3:
rospy.logwarn("Robot is lost in %s", msg.header.frame_id)
# self.processing = False
obs = None
else:
self.lost_count = 0
obs_velodyne = np.array([np.mean(x_list), np.mean(y_list)])
obs_velodyne_point = PointStamped()
obs_velodyne_point.header.frame_id = msg_frame
obs_velodyne_point.header.stamp = msg_time
obs_velodyne_point.point.x = obs_velodyne[0]
obs_velodyne_point.point.y = obs_velodyne[1]
# obs_world_point = self.tf_listener.transformPoint("/world", obs_velodyne_point)
try:
self.tf_listener.waitForTransform("/world", msg_frame, msg_time, rospy.Duration(.05))
obs_world_point = self.tf_listener.transformPoint("/world", obs_velodyne_point)
except:
rospy.logwarn("Could not transform from Velodyne to World coordinates")
self.processing = False
return
# obs_world_point = self.tf_listener.transformPoint("/world", obs_velodyne_point)
obs = | np.array([obs_world_point.point.x, obs_world_point.point.y]) | numpy.array |
from functools import partial
import numpy as np
from violajones.HaarLikeFeature import HaarLikeFeature
from violajones.HaarLikeFeature import FeatureTypes
import progressbar
from multiprocessing import Pool
LOADING_BAR_LENGTH = 50
# TODO: select optimal threshold for each feature
# TODO: attentional cascading
def learn(positive_iis, negative_iis, num_classifiers=-1, min_feature_width=1, max_feature_width=-1, min_feature_height=1, max_feature_height=-1):
"""
Selects a set of classifiers. Iteratively takes the best classifiers based
on a weighted error.
:param positive_iis: List of positive integral image examples
:type positive_iis: list[numpy.ndarray]
:param negative_iis: List of negative integral image examples
:type negative_iis: list[numpy.ndarray]
:param num_classifiers: Number of classifiers to select, -1 will use all
classifiers
:type num_classifiers: int
:return: List of selected features
:rtype: list[violajones.HaarLikeFeature.HaarLikeFeature]
"""
num_pos = len(positive_iis)
num_neg = len(negative_iis)
num_imgs = num_pos + num_neg
img_height, img_width = positive_iis[0].shape
# Maximum feature width and height default to image width and height
max_feature_height = img_height if max_feature_height == -1 else max_feature_height
max_feature_width = img_width if max_feature_width == -1 else max_feature_width
# Create initial weights and labels
pos_weights = np.ones(num_pos) * 1. / (2 * num_pos)
neg_weights = np.ones(num_neg) * 1. / (2 * num_neg)
weights = | np.hstack((pos_weights, neg_weights)) | numpy.hstack |
import numpy as np # pip3 install numpy
import scipy # pip3 install scipy
import scipy.ndimage as snd
import reikna.fft, reikna.cluda # pip3 install pyopencl/pycuda, reikna
from PIL import Image, ImageTk, ImageDraw # pip3 install pillow
try: import tkinter as tk
except: import Tkinter as tk
from fractions import Fraction
import copy, re, itertools, json, csv
import os, sys, subprocess, datetime, time
import warnings
warnings.filterwarnings('ignore', '.*output shape of zoom.*') # suppress warning from snd.zoom()
P2, PIXEL_BORDER = 0,0 # 4,2 3,1 2,1 0,0
X2, Y2 = 9,9 # 10,9 9,8 8,8 1<<9=512
PIXEL = 1 << P2; SIZEX, SIZEY = 1 << (X2-P2), 1 << (Y2-P2)
# PIXEL, PIXEL_BORDER = 1,0; SIZEX, SIZEY = 1280//PIXEL, 720//PIXEL # 720p HD
# PIXEL, PIXEL_BORDER = 1,0; SIZEX, SIZEY = 1920//PIXEL, 1080//PIXEL # 1080p HD
MIDX, MIDY = int(SIZEX / 2), int(SIZEY / 2)
DEF_R = max(min(SIZEX, SIZEY) // 4 //5*5, 13)
EPSILON = 1e-10
ROUND = 10
FPS_FREQ = 20
STATUS = []
is_windows = (os.name == 'nt')
class Board:
def __init__(self, size=[0,0]):
self.names = ['', '', '']
self.params = {'R':DEF_R, 'T':10, 'b':[1], 'm':0.1, 's':0.01, 'kn':1, 'gn':1}
self.cells = np.zeros(size)
@classmethod
def from_values(cls, names, params, cells):
self = cls()
self.names = names.copy() if names is not None else None
self.params = params.copy() if params is not None else None
self.cells = cells.copy() if cells is not None else None
return self
@classmethod
def from_data(cls, data):
self = cls()
self.names = [data.get('code',''), data.get('name',''), data.get('cname','')]
self.params = data.get('params')
if self.params:
self.params = self.params.copy()
self.params['b'] = Board.st2fracs(self.params['b'])
self.cells = data.get('cells')
if self.cells:
if type(self.cells) in [tuple, list]:
self.cells = ''.join(self.cells)
self.cells = Board.rle2arr(self.cells)
return self
def to_data(self, is_shorten=True):
rle_st = Board.arr2rle(self.cells, is_shorten)
params2 = self.params.copy()
params2['b'] = Board.fracs2st(params2['b'])
data = {'code':self.names[0], 'name':self.names[1], 'cname':self.names[2], 'params':params2, 'cells':rle_st}
return data
def params2st(self):
params2 = self.params.copy()
params2['b'] = '[' + Board.fracs2st(params2['b']) + ']'
return ','.join(['{}={}'.format(k,str(v)) for (k,v) in params2.items()])
def long_name(self):
# return ' | '.join(filter(None, self.names))
return '{0} - {1} {2}'.format(*self.names)
@staticmethod
def arr2rle(A, is_shorten=True):
''' RLE = Run-length encoding:
http://www.conwaylife.com/w/index.php?title=Run_Length_Encoded
http://golly.sourceforge.net/Help/formats.html#rle
https://www.rosettacode.org/wiki/Run-length_encoding#Python
0=b=. 1=o=A 1-24=A-X 25-48=pA-pX 49-72=qA-qX 241-255=yA-yO '''
V = np.rint(A*255).astype(int).tolist() # [[255 255] [255 0]]
code_arr = [ [' .' if v==0 else ' '+chr(ord('A')+v-1) if v<25 else chr(ord('p')+(v-25)//24) + chr(ord('A')+(v-25)%24) for v in row] for row in V] # [[yO yO] [yO .]]
if is_shorten:
rle_groups = [ [(len(list(g)),c.strip()) for c,g in itertools.groupby(row)] for row in code_arr] # [[(2 yO)] [(1 yO) (1 .)]]
for row in rle_groups:
if row[-1][1]=='.': row.pop() # [[(2 yO)] [(1 yO)]]
st = '$'.join(''.join([(str(n) if n>1 else '')+c for n,c in row]) for row in rle_groups) + '!' # "2 yO $ 1 yO"
else:
st = '$'.join(''.join(row) for row in code_arr) + '!'
# print(sum(sum(r) for r in V))
return st
@staticmethod
def rle2arr(st):
rle_groups = re.findall('(\d*)([p-y]?[.boA-X$])', st.rstrip('!')) # [(2 yO)(1 $)(1 yO)]
code_list = sum([[c] * (1 if n=='' else int(n)) for n,c in rle_groups], []) # [yO yO $ yO]
code_arr = [l.split(',') for l in ','.join(code_list).split('$')] # [[yO yO] [yO]]
V = [ [0 if c in ['.','b'] else 255 if c=='o' else ord(c)-ord('A')+1 if len(c)==1 else (ord(c[0])-ord('p'))*24+(ord(c[1])-ord('A')+25) for c in row if c!='' ] for row in code_arr] # [[255 255] [255]]
# lines = st.rstrip('!').split('$')
# rle = [re.findall('(\d*)([p-y]?[.boA-X])', row) for row in lines]
# code = [ sum([[c] * (1 if n=='' else int(n)) for n,c in row], []) for row in rle]
# V = [ [0 if c in ['.','b'] else 255 if c=='o' else ord(c)-ord('A')+1 if len(c)==1 else (ord(c[0])-ord('p'))*24+(ord(c[1])-ord('A')+25) for c in row ] for row in code]
maxlen = len(max(V, key=len))
A = np.array([row + [0] * (maxlen - len(row)) for row in V])/255 # [[1 1] [1 0]]
# print(sum(sum(r) for r in V))
return A
@staticmethod
def fracs2st(B):
return ','.join([str(f) for f in B])
@staticmethod
def st2fracs(st):
return [Fraction(st) for st in st.split(',')]
def clear(self):
self.cells.fill(0)
def add(self, part, shift=[0,0]):
# assert self.params['R'] == part.params['R']
h1, w1 = self.cells.shape
h2, w2 = part.cells.shape
h, w = min(h1, h2), min(w1, w2)
i1, j1 = (w1 - w)//2 + shift[1], (h1 - h)//2 + shift[0]
i2, j2 = (w2 - w)//2, (h2 - h)//2
# self.cells[j:j+h, i:i+w] = part.cells[0:h, 0:w]
vmin = np.amin(part.cells)
for y in range(h):
for x in range(w):
if part.cells[j2+y, i2+x] > vmin:
self.cells[(j1+y)%h1, (i1+x)%w1] = part.cells[j2+y, i2+x]
return self
def transform(self, tx, mode='RZSF', is_world=False):
if 'R' in mode and tx['rotate'] != 0:
self.cells = snd.rotate(self.cells, tx['rotate'], reshape=not is_world, order=0, mode='wrap' if is_world else 'constant')
if 'Z' in mode and tx['R'] != self.params['R']:
# print('* {} / {}'.format(tx['R'], self.params['R']))
shape_orig = self.cells.shape
self.cells = snd.zoom(self.cells, tx['R'] / self.params['R'], order=0)
if is_world:
self.cells = Board(shape_orig).add(self).cells
self.params['R'] = tx['R']
if 'F' in mode and tx['flip'] != -1:
if tx['flip'] in [0,1]: self.cells = np.flip(self.cells, axis=tx['flip'])
elif tx['flip'] == 2: self.cells[:, :-MIDX-1:-1] = self.cells[:, :MIDX]
elif tx['flip'] == 3: self.cells[:, :-MIDX-1:-1] = self.cells[::-1, :MIDX]
if 'S' in mode and tx['shift'] != [0, 0]:
self.cells = snd.shift(self.cells, tx['shift'], order=0, mode='wrap')
# self.cells = np.roll(self.cells, tx['shift'], (1, 0))
return self
def add_transformed(self, part, tx):
part = copy.deepcopy(part)
self.add(part.transform(tx, mode='RZF'), tx['shift'])
return self
def crop(self):
vmin = np.amin(self.cells)
coords = np.argwhere(self.cells > vmin)
y0, x0 = coords.min(axis=0)
y1, x1 = coords.max(axis=0) + 1
self.cells = self.cells[y0:y1, x0:x1]
return self
class Automaton:
kernel_core = {
0: lambda r: (4 * r * (1-r))**4, # polynomial (quad4)
1: lambda r: np.exp( 4 - 1 / (r * (1-r)) ), # exponential / gaussian bump (bump4)
2: lambda r, q=1/4: (r>=q)*(r<=1-q), # step (stpz1/4)
3: lambda r, q=1/4: (r>=q)*(r<=1-q) + (r<q)*0.5 # staircase (life)
}
field_func = {
0: lambda n, m, s: np.maximum(0, 1 - (n-m)**2 / (9 * s**2) )**4 * 2 - 1, # polynomial (quad4)
1: lambda n, m, s: np.exp( - (n-m)**2 / (2 * s**2) ) * 2 - 1, # exponential / gaussian (gaus)
2: lambda n, m, s: (np.abs(n-m)<=s) * 2 - 1 # step (stpz)
}
def __init__(self, world):
self.world = world
self.world_FFT = np.zeros(world.cells.shape)
self.potential_FFT = np.zeros(world.cells.shape)
self.potential = np.zeros(world.cells.shape)
self.field = np.zeros(world.cells.shape)
self.field_old = None
self.change = np.zeros(world.cells.shape)
self.X = None
self.Y = None
self.D = None
self.gen = 0
self.time = 0
self.is_multi_step = False
self.is_soft_clip = False
self.is_inverted = False
self.kn = 1
self.gn = 1
self.is_gpu = True
self.has_gpu = True
self.compile_gpu(self.world.cells)
self.calc_kernel()
def kernel_shell(self, r):
k = len(self.world.params['b'])
kr = k * r
bs = np.array([float(f) for f in self.world.params['b']])
b = bs[np.minimum(np.floor(kr).astype(int), k-1)]
kfunc = Automaton.kernel_core[(self.world.params.get('kn') or self.kn) - 1]
return (r<1) * kfunc(np.minimum(kr % 1, 1)) * b
@staticmethod
def soft_max(x, m, k):
''' Soft maximum: https://www.johndcook.com/blog/2010/01/13/soft-maximum/ '''
return np.log(np.exp(k*x) + np.exp(k*m)) / k
@staticmethod
def soft_clip(x, min, max, k):
a = np.exp(k*x)
b = np.exp(k*min)
c = np.exp(-k*max)
return np.log( 1/(a+b)+c ) / -k
# return Automaton.soft_max(Automaton.soft_max(x, min, k), max, -k)
def compile_gpu(self, A):
''' Reikna: http://reikna.publicfields.net/en/latest/api/computations.html '''
self.gpu_api = self.gpu_thr = self.gpu_fft = self.gpu_fftshift = None
try:
self.gpu_api = reikna.cluda.any_api()
self.gpu_thr = self.gpu_api.Thread.create()
self.gpu_fft = reikna.fft.FFT(A.astype(np.complex64)).compile(self.gpu_thr)
self.gpu_fftshift = reikna.fft.FFTShift(A.astype(np.float32)).compile(self.gpu_thr)
except Exception as exc:
# if str(exc) == "No supported GPGPU APIs found":
self.has_gpu = False
self.is_gpu = False
print(exc)
# raise exc
def run_gpu(self, A, cpu_func, gpu_func, dtype, **kwargs):
if self.is_gpu and self.gpu_thr and gpu_func:
op_dev = self.gpu_thr.to_device(A.astype(dtype))
gpu_func(op_dev, op_dev, **kwargs)
return op_dev.get()
else:
return cpu_func(A)
# return np.roll(potential_shifted, (MIDX, MIDY), (1, 0))
def fft(self, A): return self.run_gpu(A, np.fft.fft2, self.gpu_fft, np.complex64)
def ifft(self, A): return self.run_gpu(A, np.fft.ifft2, self.gpu_fft, np.complex64, inverse=True)
def fftshift(self, A): return self.run_gpu(A, np.fft.fftshift, self.gpu_fftshift, np.float32)
def calc_once(self):
A = self.world.cells
self.world_FFT = self.fft(A)
self.potential_FFT = self.kernel_FFT * self.world_FFT
self.potential = self.fftshift(np.real(self.ifft(self.potential_FFT)))
gfunc = Automaton.field_func[(self.world.params.get('gn') or self.gn) - 1]
self.field = gfunc(self.potential, self.world.params['m'], self.world.params['s'])
dt = 1 / self.world.params['T']
if self.is_multi_step and self.field_old:
D = 1/2 * (3 * self.field - self.field_old)
self.field_old = self.field.copy()
else:
D = self.field
if not self.is_soft_clip:
A_new = np.clip(A + dt * D, 0, 1) # A_new = A + dt * np.clip(D, -A/dt, (1-A)/dt)
else:
A_new = Automaton.soft_clip(A + dt * D, 0, 1, 1/dt) # A_new = A + dt * Automaton.soft_clip(D, -A/dt, (1-A)/dt, 1)
self.change = (A_new - A) / dt
self.world.cells = A_new
self.gen += 1
self.time = round(self.time + dt, ROUND)
if self.is_gpu:
self.gpu_thr.synchronize()
def calc_kernel(self):
I, J = np.meshgrid(np.arange(SIZEX), np.arange(SIZEY))
self.X = (I - MIDX) / self.world.params['R']
self.Y = (J - MIDY) / self.world.params['R']
self.D = np.sqrt(self.X**2 + self.Y**2)
self.kernel = self.kernel_shell(self.D)
self.kernel_sum = np.sum(self.kernel)
kernel_norm = self.kernel / self.kernel_sum
self.kernel_FFT = self.fft(kernel_norm)
self.kernel_updated = False
def reset(self):
self.gen = 0
self.time = 0
self.field_old = None
class Analyzer:
STAT_NAMES = {'p_m':'Param m', 'p_s':'Param s', 'n':'Gen (#)', 't':'Time (s)',
'm':'Mass (mg)', 'g':'Growth (mg/s)', 'I':'Moment of inertia',
'd':'Mass-growth distance (mm)', 's':'Speed (mm/s)', 'w':'Angular speed (deg/s)', 'm_a':'Mass asymmetry (mg)'}
# 'a':'Semi-major axis (mm)', 'b':'Semi-minor axis (mm)', 'e':'Eccentricity', 'c':'Compactness', 'w_th':'Shape angular speed (deg/s)'}
STAT_HEADERS = ['p_m', 'p_s', 'n', 't', 'm', 'g', 'I', 'd', 's', 'w', 'm_a']
# , 'a', 'b', 'e', 'c', 'w_th']
SEGMENT_LEN = 200
def __init__(self, automaton):
self.automaton = automaton
self.world = self.automaton.world
# self.aaa = self.world.cells
self.reset()
def reset(self):
self.is_empty = False
self.is_full = False
self.mass = 0
self.growth = 0
self.inertia = 0
self.m_last_center = None
self.m_center = None
self.g_center = None
self.mg_dist = 0
self.m_shift = 0
self.m_last_angle = None
self.m_angle = 0
self.m_rotate = 0
self.mass_asym = 0
# self.shape_major_axis = 0
# self.shape_minor_axis = 0
# self.shape_eccentricity = 0
# self.shape_compactness = 0
# self.shape_last_angle = None
# self.shape_angle = 0
# self.shape_rotate = 0
self.series = []
self.last_shift_idx = np.zeros(2)
self.total_shift_idx = np.zeros(2)
self.is_clip_segment = True
def calc_stat(self):
R, T = [self.world.params[k] for k in ('R', 'T')]
A = self.world.cells
G = np.maximum(self.automaton.field, 0)
h, w = A.shape
X, Y = self.automaton.X, self.automaton.Y
m00 = self.mass = | np.sum(A) | numpy.sum |
import os, sys
import glob
import shutil
import argparse
import time
import wave
import numpy as np
class Input():
def __init__(self, filename, n_input):
_, ext = os.path.splitext(filename)
if ext == '.wav':
if os.path.isfile(filename):
self.input = _InputMultiWav(filename, n_input)
else:
self.input = _InputMonoWav(filename, n_input)
elif ext == '.npy':
if os.path.isfile(filename):
self.input = _InputMultiNpy(filename, n_input)
else:
self.input = _InputMonoNpy(filename, n_input)
else:
message = '{} file is not supported.'.format(ext)
print(message)
sys.exit()
self.nframes = self.input.nframes
return
def close(self):
self.input.close()
return
def readframes(self, n):
return self.input.readframes(n)
class _InputMultiWav():
def __init__(self, filename, n_input):
self.wr = wave.open(filename, 'r')
params = self.wr.getparams()
self.nchannels = params[0]
self.ws = params[1]
self.fs = params[2]
self.nframes = params[3]
if self.nchannels != n_input:
print('number of input channels does not match.')
print('%s contains %d ch signals. != %d'\
% (filename, self.nchannels, n_input))
sys.exit()
self.pointer = 0
return
def close(self):
self.wr.close()
return
def readframes(self, n):
s = self.pointer
e = s + n
if e > self.nframes:
e = self.nframes
N = e - s
frames = self.wr.readframes(N)
if self.ws == 3:
d = np.zeros((N * self.nchannels, 4), dtype=np.uint8)
d[:, 1:] = np.frombuffer(frames, dtype=np.uint8).reshape(-1, 3)
data = d.view(np.int32)[:, 0] / 2147483648
elif self.ws == 2:
data = np.frombuffer(frames, dtype=np.int16) / 32768
elif self.ws == 4:
data = np.frombuffer(frames, dtype=np.int32) / 2147483648
data = data.reshape((self.nchannels, -1), order='F')
self.pointer += e - s
return data
class _InputMultiNpy():
def __init__(self, filename, n_input):
self.wr = | np.load(filename, 'r') | numpy.load |
__author__ = '<NAME>'
__email__ = '<EMAIL>'
from scipy import stats
from sklearn.cross_decomposition import PLSRegression
import numpy as np
from sklearn.metrics import mean_squared_error
import pandas as pd
import sys
import pdb
from sklearn.decomposition import PCA
from .Window import Window
from .util import utility_module as utility
from .util.pls_nipals import vipp
class DionesusWindow(Window):
"""
A window that runs Dionesus as the network inference algorithm. The PLSR function is from sci-kit learn for
implementation consistency between window types
For more information about Dionesus see:
Ciaccio, <NAME>., et al. "The DIONESUS algorithm provides scalable and accurate reconstruction of dynamic
phosphoproteomic networks to reveal new drug targets." Integrative Biology (2015).
"""
def __init__(self, dataframe, window_info, roller_data, td_window, explanatory_dict, response_dict):
super(DionesusWindow, self).__init__(dataframe, window_info, roller_data, td_window, explanatory_dict,
response_dict)
self.num_pcs = None
self.beta_coefficients = None
self.vip = None
self.cv_table = None
self.bootstrap_matrix = None
self.freq_matrix = None
self.edge_stability_auc = None
def make_edge_table(self, calc_mse=False):
"""
:return:
Called by:
Swing.rank_edges()
"""
# Build indexing method for all possible edges. Length = number of parents * number of children
parent_index = range(self.beta_coefficients.shape[1])
child_index = range(self.beta_coefficients.shape[0])
a, b = np.meshgrid(parent_index, child_index)
# Flatten arrays to be used in link list creation
df = pd.DataFrame()
df['Parent'] = self.beta_coefficients.columns.values[a.flatten()]
df['Child'] = self.beta_coefficients.index.values[b.flatten()]
df['Importance'] = self.vip.values.flatten()
df['Beta'] = self.beta_coefficients.values.flatten()
df['P_window'] = self.explanatory_window[a.flatten()]
# Calculate the window of the child node, which is equivalent to the current window index
child_values = np.array([self.nth_window] * self.beta_coefficients.shape[0])
df['C_window'] = child_values[b.flatten()]
if self.permutation_p_values is not None:
df["p_value"] = self.permutation_p_values.flatten()
# Remove any self edges
df = df[~((df['Parent'] == df['Child']) & (df['P_window'] == df['C_window']))]
if calc_mse:
df['MSE_diff'] = self.edge_mse_diff.flatten()
return df
def sort_edges(self, method="importance"):
if self.results_table is None:
raise ValueError("The edge table must be created before getting edges")
if method == "p_value":
self.results_table.sort(columns=['p_value', 'importance'], ascending=[True, False], inplace=True)
elif method == "importance":
self.results_table.sort(columns=['importance', 'p_value'], ascending=[False, True], inplace=True)
return self.results_table['regulator-target'].values
def generate_results_table(self):
# generate edges for initial model
initial_edges = self.create_linked_list(self.beta_coefficients, 'B')
# permutation edges
permutation_mean_edges = self.create_linked_list(self.permutation_means, 'p-means')
permutation_sd_edges = self.create_linked_list(self.permutation_sd, 'p-sd')
stability_edges = self.create_linked_list(self.edge_stability_auc, 'stability')
aggregated_edges = initial_edges.merge(permutation_mean_edges, on='regulator-target').merge(
permutation_sd_edges, on='regulator-target').merge(stability_edges, on='regulator-target')
# sorry, it is a little messy to do the p-value calculations for permutation tests here...
# valid_indices = aggregated_edges['p-sd'] != 0
# valid_indices = aggregated_edges['B'] != 0
valid_window = aggregated_edges
initial_B = valid_window['B']
sd = valid_window['p-sd']
mean = valid_window['p-means']
valid_window['final-z-scores-perm'] = (initial_B - mean) / sd
valid_window['cdf-perm'] = (-1 * abs(valid_window['final-z-scores-perm'])).apply(stats.norm.cdf)
# calculate t-tailed pvalue
valid_window['p-value-perm'] = (2 * valid_window['cdf-perm'])
self.results_table = valid_window
return (self.results_table)
def rank_results(self, rank_by, ascending=False):
rank_column_name = rank_by + "-rank"
# rank edges with an actual beta value first until further notice ##
valid_indices = self.results_table['B'] != 0
valid_window = self.results_table[valid_indices]
valid_window[rank_column_name] = valid_window[rank_by].rank(method="dense", ascending=ascending)
edge_n = len(valid_window.index)
invalid_indices = self.results_table['B'] == 0
invalid_window = self.results_table[invalid_indices]
invalid_window[rank_column_name] = invalid_window[rank_by].rank(method="dense", ascending=ascending)
invalid_window[rank_column_name] += edge_n
self.results_table = valid_window.append(invalid_window)
self.results_table = self.results_table.sort(columns=rank_column_name, axis=0)
return (self.results_table)
def run_permutation_test(self, n_permutations=1000, crag=False):
# initialize permutation results array
self.permutation_means = np.empty((self.n_genes, self.n_genes))
self.permutation_sd = np.empty((self.n_genes, self.n_genes))
zeros = np.zeros(self.beta_coefficients.shape)
# initialize running calculation
result = {'n': zeros.copy(), 'mean': zeros.copy(), 'ss': zeros.copy()}
# inner loop: permute the window N number of times
for nth_perm in range(0, n_permutations):
# if (nth_perm % 200 == 0):
# print 'Perm Run: ' +str(nth_perm)
# permute data
permuted_data = self.permute_data(self.explanatory_data)
# fit the data and get coefficients
result_tuple = self.get_coeffs(x_data=permuted_data)
permuted_coeffs = result_tuple[0]
permuted_vip = result_tuple[1]
dummy_list = [permuted_coeffs]
result = self.update_variance_2D(result, dummy_list)
self.permutation_means = result['mean'].copy()
self.permutation_sd = np.sqrt(result['variance'].copy())
self.permutation_p_values = self.calc_p_value()
def calc_p_value(self, value=None, mean=None, sd=None):
if value is None:
value = self.beta_coefficients.copy()
if mean is None:
mean = self.permutation_means.copy()
if sd is None:
sd = self.permutation_sd.copy()
z_scores = (value - mean) / sd
cdf = stats.norm.cdf((-1 * abs(z_scores)))
p_values = 2 * cdf
return p_values
def initialize_params(self):
"""
Optimize the number of PCs to use.
:return:
"""
# calculate the Q2 score using PC=1,2,3,4,5
# pick the PCs that maximizes the Q2 score-PCs tradeoff, using the elbow rule, maximizing the second derivative or maximum curvature.
temp = self.remove_stationary_ts
self.remove_stationary_ts = False
result_tuple = self.get_coeffs(crag=False, calc_mse=False)
self.remove_stationary_ts = temp
mse_diff = result_tuple[2]
model_list = result_tuple[3]
model_inputs = result_tuple[4]
explained_variances = None
size_test = []
for response, explanatory, index in model_inputs:
size_test.append(explanatory.shape)
min_dim=sorted(size_test,key=lambda x: x[1], reverse=False)[0][1]
for response, explanatory, index in model_inputs:
pca = PCA()
pca.fit(explanatory)
if explained_variances is None:
explained_variances = pca.explained_variance_ratio_
else:
try:
explained_variances = np.vstack((explained_variances, pca.explained_variance_ratio_))
except ValueError:
try:
explained_variances = np.vstack((explained_variances[:,:min_dim], pca.explained_variance_ratio_[:min_dim]))
except IndexError:
truncated_index = min_dim
explained_variances = np.vstack((explained_variances[:truncated_index], pca.explained_variance_ratio_[:truncated_index]))
explained_variances_mean = np.mean(explained_variances, axis = 0)
test_pcs = [x for x in range(1, len(explained_variances_mean)+1)]
elbow_x, elbow_y = utility.elbow_criteria(test_pcs, explained_variances_mean)
self.num_pcs = elbow_x
def fit_window(self, pcs=3, crag=False, calc_mse=False):
"""
Set the attributes of the window using expected pipeline procedure and calculate beta values
:return:
"""
if self.num_pcs is not None:
pcs = self.num_pcs
result_tuple = self.get_coeffs(pcs, crag = crag, calc_mse = calc_mse)
self.beta_coefficients = result_tuple[0]
self.vip = result_tuple[1]
self.edge_mse_diff = result_tuple[2]
self.model_list = result_tuple[3]
def _fitstack_coeffs(self, n_pcs, coeff_matrix, vip_matrix, model_list, x_matrix, target_y, col_index, crag=False):
"""
:param n_pcs:
:param coeff_matrix:
:param vip_matrix:
:param model_list:
:param x_matrix:
:param target_y:
:param col_index:
:param crag:
:return:
"""
pls = PLSRegression(n_pcs, False)
# Fit the model
pls.fit(x_matrix, target_y)
model_params = {'col_index': col_index,
'response': target_y,
'predictor': x_matrix,
'model': pls}
model_list.append(model_params)
# artificially add a 0 to where the col_index is to prevent self-edges
coeffs = pls.coef_
coeffs = np.reshape(coeffs, (len(coeffs),))
vips = vipp(x_matrix, target_y, pls.x_scores_, pls.x_weights_)
vips = np.reshape(vips, (len(vips),))
if coeff_matrix.shape[1] - len(coeffs) == 1:
coeffs = np.insert(coeffs, col_index, 0)
vips = np.insert(vips, col_index, 0)
coeff_matrix = | np.vstack((coeff_matrix, coeffs)) | numpy.vstack |
def selection_10():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(0.0,2000.0,81,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([12.5,37.5,62.5,87.5,112.5,137.5,162.5,187.5,212.5,237.5,262.5,287.5,312.5,337.5,362.5,387.5,412.5,437.5,462.5,487.5,512.5,537.5,562.5,587.5,612.5,637.5,662.5,687.5,712.5,737.5,762.5,787.5,812.5,837.5,862.5,887.5,912.5,937.5,962.5,987.5,1012.5,1037.5,1062.5,1087.5,1112.5,1137.5,1162.5,1187.5,1212.5,1237.5,1262.5,1287.5,1312.5,1337.5,1362.5,1387.5,1412.5,1437.5,1462.5,1487.5,1512.5,1537.5,1562.5,1587.5,1612.5,1637.5,1662.5,1687.5,1712.5,1737.5,1762.5,1787.5,1812.5,1837.5,1862.5,1887.5,1912.5,1937.5,1962.5,1987.5])
# Creating weights for histo: y11_PT_0
y11_PT_0_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y11_PT_1
y11_PT_1_weights = | numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating a new Canvas
fig = plt.figure(figsize=(12,6),dpi=80) | numpy.array |
'''
The manager can solve the work plane sketching,
1. Users can create work planes with input sketch
2. When users draw on a selected work plane, the manager
creates a thin structure according to input sketch.
'''
import numpy as np
import trimesh
import math
from pyntcloud.utils.array import PCA
from configs.config_ui import cfg
import sketch_3d_ui.geometry.geometry_utils as geometry_utils
from sketch_3d_ui.geometry.bill_board import BillBoard
from sketch_3d_ui.geometry.work_plane import WorkPlane
from sketch_3d_ui.manager.geometry_manager import GeometryManager as GM
from sketch_3d_ui.counter import COUNTER
from utils.point_cloud_utils import output_point_cloud_ply
from PyQt5.QtCore import QSize, Qt, QRect, QPoint
from PyQt5.QtGui import QColor, QIcon, QPixmap, QScreen, QPainter, QPen, QImage
from PyQt5.QtWidgets import QOpenGLWidget
class WorkPlaneSketchManager(GM):
def __init__(self):
super(WorkPlaneSketchManager, self).__init__()
self.line_mode = 'free'
self.__candidate_work_plane = None
# bill board
self.confirm_board = BillBoard()
self.candidate_board = BillBoard()
self.canvas = QImage(896, 896, QImage.Format_ARGB32)
self.canvas.fill(Qt.transparent)
self.tmp_canvas = QImage(896, 896, QImage.Format_ARGB32)
self.tmp_canvas.fill(Qt.transparent)
self.sketching = False
self.current_sketch_width = 5
self.current_2d_line = []
def get_candidate_work_plane(self):
return self.__candidate_work_plane
def init_manager(self):
self.init_state()
def init_state(self):
if GM.current_id == None:
self.state = 'UN_SELECTED'
else:
self.state = 'SELECTED'
def update_state(self):
if self.state == 'UN_SELECTED':
if self.__candidate_work_plane:
self.state = 'CONFIRM'
else:
self.state = 'SELECTED'
elif self.state == 'CONFIRM':
self.state = 'SELECTED'
elif self.state == 'SELECTED':
self.state = 'UN_SELECTED'
else:
pass
def solve_mouse_event(self, event):
if self.state == 'UN_SELECTED':
self.solve_unselected(event)
elif self.state == 'CONFIRM':
self.solve_confirm(event)
elif self.state == 'SELECTED':
self.solve_selected(event)
def solve_unselected(self, event):
if event == 'press':
self.create_work_plane_by_near_point_cloud_to_camera()
self.start_pos = QPoint(self.mouse_x, self.mouse_y)
self.last_pos = QPoint(self.mouse_x, self.mouse_y)
self.draw_on_canvas()
elif event == 'move':
self.draw_on_canvas()
elif event == 'release':
if len(self.current_2d_line) > 1 and GM.current_id != None:
self.add_current_3d_line(boundary=False)
self.create_work_plane()
self.finish_sketch_create_plane()
self.update_state()
else:
# if only mouse only click once, remove the created work plane
GM.work_planes = GM.work_planes[:-1]
GM.current_id = None
self.clear_canvas()
self.current_2d_line = []
else:
pass
def solve_confirm(self, event):
if event == 'press':
pass
elif event == 'move':
pass
elif event == 'release':
hit = self.check_click_bill_board()
if hit:
self.update_state()
else:
pass
def solve_selected(self, event):
if event == 'press':
# on canvas, for visualize
self.start_pos = QPoint(self.mouse_x, self.mouse_y)
self.last_pos = QPoint(self.mouse_x, self.mouse_y)
self.draw_on_canvas()
elif event == 'move':
# on canvas, for visualize
self.draw_on_canvas()
elif event == 'release':
if len(self.current_2d_line) > 1 and GM.current_id != None:
self.add_current_3d_line(boundary=True)
self.finish_sketch_current_plane()
self.clear_canvas()
self.current_2d_line = []
else:
pass
def create_work_plane_by_near_point_cloud_to_camera(self):
# find the 100 nearest point from the camera and fit a work_plane
near_point_cloud = GM.base_point_cloud.get_near_points_from_point_cloud(self.current_view_port.camera_pos)
work_plane = WorkPlane()
work_plane.init_from_point_cloud(near_point_cloud)
# set the current work work plane to be the newest work_plane
GM.work_planes.append(work_plane)
GM.current_id = len(GM.work_planes) - 1
def draw_on_canvas(self):
current_pos = QPoint(self.mouse_x, self.mouse_y)
if self.line_mode == 'straight':
self.canvas = self.tmp_canvas.copy()
painter = QPainter(self.canvas)
painter.setPen(QPen(QColor(Qt.green),
self.current_sketch_width*2,
Qt.SolidLine,
Qt.RoundCap,
Qt.RoundJoin))
# avoid the duplicate input points, which may happen when using the ipad and apple pencil input.
if self.line_mode == 'free' and current_pos != self.last_pos:
painter.drawLine(self.last_pos, current_pos)
self.current_2d_line.append([self.mouse_x, self.mouse_y])
elif self.line_mode == 'straight' and current_pos != self.start_pos:
painter.drawLine(self.start_pos, current_pos)
self.current_2d_line = [[self.start_pos.x(), self.start_pos.y()],
[current_pos.x(), current_pos.y()]]
else:
pass
painter.end()
self.last_pos = QPoint(self.mouse_x, self.mouse_y)
def clear_canvas(self):
self.canvas.fill(Qt.transparent)
def check_click_bill_board(self):
hit, hit_point, hit_id = self.mouse_ray_and_planes_hit_detection(mouse_x=self.mouse_x, mouse_y=self.mouse_y,
planes=[self.confirm_board,
self.candidate_board])
if hit:
# delete candidate when click confirm
if hit_id == 0:
self.__candidate_work_plane = None
# add candidate when click candidate
elif hit_id == 1:
GM.work_planes.append(self.__candidate_work_plane)
self.__candidate_work_plane = None
return hit
def add_current_3d_line(self, boundary=False):
self.sketching = False
line_3d = []
for point_2d in self.current_2d_line:
hit, hit_point, _ = \
self.mouse_ray_and_planes_hit_detection(mouse_x=point_2d[0], mouse_y=point_2d[1],
planes=[GM.work_planes[GM.current_id]],
boundary=boundary)
if hit:
self.sketching = True
line_3d.append(hit_point)
GM.work_planes[GM.current_id].lines_3d.append(line_3d)
def create_work_plane(self):
COUNTER.count_plane_creation += 1
self.create_current_plane()
self.create_candidate_plane()
# For rule 1: near plane
# adjust the work_plane size according to the sketch
def create_current_plane(self):
GM.work_planes[GM.current_id].update_bounding_rec_2d_from_lines(self.current_2d_line)
bounding_rec_3d = []
for point in GM.work_planes[GM.current_id].bounding_rec_2d:
hit, hit_point, _ = self.mouse_ray_and_planes_hit_detection(mouse_x=point[0], mouse_y=point[1],
planes=[GM.work_planes[GM.current_id]], boundary=False)
if hit:
bounding_rec_3d.append(hit_point)
GM.work_planes[GM.current_id].bounding_rec_3d = bounding_rec_3d
# For rule 2: candidate plane
# detect if 2 endpoints of the sketch is attached on the point cloud
# if true, suggest that work_plane too
def create_candidate_plane(self):
attach, attach_start_point, attach_end_point = self.check_end_points_attach()
if attach:
candidate_center = (attach_start_point + attach_end_point) / 2
candidate_vector = attach_end_point - attach_start_point
near_point_cloud = self.base_point_cloud.get_near_points_from_point_cloud(candidate_center)
self.create_candidate_work_plane(candidate_center, near_point_cloud)
self.update_candidate_plane(candidate_center, candidate_vector)
self.init_bill_board_list(left_top=GM.work_planes[GM.current_id].bounding_rec_3d[3],
bill_boards=['confirm_board', 'candidate_board'])
def check_end_points_attach(self):
attach = False
attach_start_point = None
attach_end_point = None
start_point_2d = self.current_2d_line[0]
end_point_2d = self.current_2d_line[len(self.current_2d_line) - 1]
hit_start, hit_start_id = self.mouse_ray_and_point_cloud_hit_detection(mouse_x=start_point_2d[0],
mouse_y=start_point_2d[1],
point_cloud=self.base_point_cloud.positions)
hit_end, hit_end_id = self.mouse_ray_and_point_cloud_hit_detection(mouse_x=end_point_2d[0],
mouse_y=end_point_2d[1],
point_cloud=self.base_point_cloud.positions)
if hit_start and hit_end:
attach = True
attach_start_point = self.base_point_cloud.positions[hit_start_id[0]]
attach_end_point = self.base_point_cloud.positions[hit_end_id[0]]
return attach, attach_start_point, attach_end_point
def create_candidate_work_plane(self, candidate_center, near_point_cloud):
w, v = PCA(near_point_cloud)
# if not span a flat plane,
max_id = 0
if not (w[0] >= w[2]*2 and w[1] >= w[2]*2 and w[0] <= w[1]*2):
view_vector = (self.current_view_port.camera_pos - candidate_center)
unit_view_vector = view_vector / np.linalg.norm(view_vector)
angles = []
for i in range(3):
angle = geometry_utils.compute_angle(unit_view_vector, v[:, i])
angles.append(angle)
max_id = np.argmax(angles)
candidate_plane_normal = v[:, max_id]
self.__candidate_work_plane = WorkPlane()
self.__candidate_work_plane.init_from_point_normal(point=candidate_center,
normal=candidate_plane_normal)
else:
self.__candidate_work_plane = WorkPlane()
self.__candidate_work_plane.init_from_point_cloud(near_point_cloud)
def update_candidate_plane(self, candidate_center, candidate_vector):
# user draw line which spans the plane
line = GM.work_planes[GM.current_id].lines_3d[0]
# check if current normal is face to the camera
current_normal = GM.work_planes[GM.current_id].normal
veiw_vector = self.current_view_port.camera_pos - GM.work_planes[GM.current_id].get_center()
if np.dot(current_normal, veiw_vector) < 0:
current_normal = -current_normal
# align bounding rectangle to x,y plane
z_vector = np.array([0., 0., 1.])
x_vector = np.array([1., 0., 0.])
origin = np.array([0., 0., 0.])
line, rec, r_mat_to_plane, r_mat_to_vector = geometry_utils.align_points_to_plane(line=GM.work_planes[GM.current_id].lines_3d[0],
rec=GM.work_planes[GM.current_id].bounding_rec_3d,
ori_normal=current_normal,
des_normal=z_vector,
align_end_points_vector=x_vector,
align_end_points_center=origin)
# scale points
candidate_vector_length = np.sqrt(np.sum(candidate_vector**2))
line_length = np.sqrt(np.sum((line[len(line) - 1] - line[0])**2))
x_factor = candidate_vector_length/line_length
line = geometry_utils.scale_points(line, x_factor=x_factor, y_factor=1., z_factor=1.)
rec = geometry_utils.scale_points(rec, x_factor=x_factor, y_factor=1., z_factor=1.)
line = np.dot(line, np.transpose(np.linalg.inv(r_mat_to_vector)))
line = np.dot(line, np.transpose(np.linalg.inv(r_mat_to_plane)))
rec = np.dot(rec, np.transpose(np.linalg.inv(r_mat_to_vector)))
rec = np.dot(rec, np.transpose(np.linalg.inv(r_mat_to_plane)))
candidate_normal = self.__candidate_work_plane.normal
veiw_vector = self.current_view_port.camera_pos - self.__candidate_work_plane.point
if | np.dot(candidate_normal, veiw_vector) | numpy.dot |
# Copyright 2017 Max Planck Society
# Distributed under the BSD-3 Software license,
# (See accompanying file ./LICENSE.txt or copy at
# https://opensource.org/licenses/BSD-3-Clause)
"""Training AdaGAN on various datasets.
Refer to the arXiv paper 'AdaGAN: Boosting Generative Models'
Coded by <NAME>, <NAME>
"""
import os
import argparse
import logging
import tensorflow as tf
import numpy as np
from datahandler import DataHandler
from adagan import AdaGan
from metrics import Metrics
import utils
flags = tf.app.flags
flags.DEFINE_float("g_learning_rate", 0.0002,
"Learning rate for Generator optimizers [16e-4]")
flags.DEFINE_float("d_learning_rate", 0.0001,
"Learning rate for Discriminator optimizers [4e-4]")
flags.DEFINE_float("learning_rate", 0.003,
"Learning rate for other optimizers [8e-4]")
flags.DEFINE_float("adam_beta1", 0.5, "Beta1 parameter for Adam optimizer [0.5]")
flags.DEFINE_integer("zdim", 50, "Dimensionality of the latent space [100]")
flags.DEFINE_float("init_std", 0.01, "Initial variance for weights [0.02]")
flags.DEFINE_string("workdir", 'results_cifar10_pot_conv', "Working directory ['results']")
flags.DEFINE_bool("unrolled", False, "Use unrolled GAN training [True]")
flags.DEFINE_bool("vae", False, "Use VAE instead of GAN")
flags.DEFINE_bool("pot", True, "Use POT instead of GAN")
flags.DEFINE_float("pot_lambda", 1., "POT regularization")
flags.DEFINE_bool("is_bagging", False, "Do we want to use bagging instead of adagan? [False]")
FLAGS = flags.FLAGS
def main():
opts = {}
# Utility
opts['random_seed'] = 66
opts['dataset'] = 'cifar10' # gmm, circle_gmm, mnist, mnist3 ...
opts['data_dir'] = 'cifar10'
opts['trained_model_path'] = None #'models'
opts['mnist_trained_model_file'] = None #'mnist_trainSteps_19999_yhat' # 'mnist_trainSteps_20000'
opts['work_dir'] = FLAGS.workdir
opts['ckpt_dir'] = 'checkpoints'
opts["verbose"] = 1
opts['tf_run_batch_size'] = 128
opts["early_stop"] = -1 # set -1 to run normally
opts["plot_every"] = 150
opts["save_every_epoch"] = 10
opts['gmm_max_val'] = 15.
# Datasets
opts['toy_dataset_size'] = 10000
opts['toy_dataset_dim'] = 2
opts['mnist3_dataset_size'] = 2 * 64 # 64 * 2500
opts['mnist3_to_channels'] = False # Hide 3 digits of MNIST to channels
opts['input_normalize_sym'] = False # Normalize data to [-1, 1]
opts['gmm_modes_num'] = 5
# AdaGAN parameters
opts['adagan_steps_total'] = 1
opts['samples_per_component'] = 1000
opts['is_bagging'] = FLAGS.is_bagging
opts['beta_heur'] = 'uniform' # uniform, constant
opts['weights_heur'] = 'theory_star' # theory_star, theory_dagger, topk
opts['beta_constant'] = 0.5
opts['topk_constant'] = 0.5
opts["mixture_c_epoch_num"] = 5
opts["eval_points_num"] = 25600
opts['digit_classification_threshold'] = 0.999
opts['inverse_metric'] = False # Use metric from the Unrolled GAN paper?
opts['inverse_num'] = 100 # Number of real points to inverse.
opts['objective'] = None
# Generative model parameters
opts["init_std"] = FLAGS.init_std
opts["init_bias"] = 0.0
opts['latent_space_distr'] = 'normal' # uniform, normal
opts['latent_space_dim'] = FLAGS.zdim
opts["gan_epoch_num"] = 200
opts['convolutions'] = True
opts['d_num_filters'] = 512
opts['d_num_layers'] = 4
opts['g_num_filters'] = 1024
opts['g_num_layers'] = 3
opts['e_is_random'] = False
opts['e_num_filters'] = 1024
opts['e_num_layers'] = 3
opts['g_arch'] = 'dcgan_mod'
opts['g_stride1_deconv'] = False
opts['g_3x3_conv'] = 0
opts['e_arch'] = 'dcgan'
opts['e_3x3_conv'] = 0
opts['conv_filters_dim'] = 5
# --GAN specific:
opts['conditional'] = False
opts['unrolled'] = FLAGS.unrolled # Use Unrolled GAN? (only for images)
opts['unrolling_steps'] = 5 # Used only if unrolled = True
# --VAE specific
opts['vae'] = FLAGS.vae
opts['vae_sigma'] = 0.01
# --POT specific
opts['pot'] = FLAGS.pot
opts['pot_pz_std'] = 2.
opts['pot_lambda'] = FLAGS.pot_lambda
opts['adv_c_loss'] = 'none'
opts['vgg_layer'] = 'pool2'
opts['adv_c_patches_size'] = 5
opts['adv_c_num_units'] = 32
opts['adv_c_loss_w'] = 0.0
opts['cross_p_w'] = 0.0
opts['diag_p_w'] = 0.0
opts['emb_c_loss_w'] = 0.0
opts['reconstr_w'] = 1.0
opts['z_test'] = 'gan'
opts['z_test_corr_w'] = 0.1
opts['z_test_proj_dim'] = 50
# Optimizer parameters
opts['optimizer'] = 'adam' # sgd, adam
opts["batch_size"] = 100
opts["d_steps"] = 1
opts['d_new_minibatch'] = False
opts["g_steps"] = 2
opts['batch_norm'] = True
opts['dropout'] = True
opts['dropout_keep_prob'] = 0.5
opts['recon_loss'] = 'l2'
# "manual" or number (float or int) giving the number of epochs to divide
# the learning rate by 10 (converted into an exp decay per epoch).
opts['decay_schedule'] = 100
opts['opt_learning_rate'] = FLAGS.learning_rate
opts['opt_d_learning_rate'] = FLAGS.d_learning_rate
opts['opt_g_learning_rate'] = FLAGS.g_learning_rate
opts["opt_beta1"] = FLAGS.adam_beta1
opts['batch_norm_eps'] = 1e-05
opts['batch_norm_decay'] = 0.9
if opts['e_is_random']:
assert opts['latent_space_distr'] == 'normal',\
'Random encoders currently work only with Gaussian Pz'
# Data augmentation
opts['data_augm'] = False
if opts['verbose']:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(message)s')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
utils.create_dir(opts['work_dir'])
utils.create_dir(os.path.join(opts['work_dir'], opts['ckpt_dir']))
with utils.o_gfile((opts['work_dir'], 'params.txt'), 'w') as text:
text.write('Parameters:\n')
for key in opts:
text.write('%s : %s\n' % (key, opts[key]))
data = DataHandler(opts)
assert data.num_points >= opts['batch_size'], 'Training set too small'
adagan = AdaGan(opts, data)
metrics = Metrics()
train_size = data.num_points
random_idx = np.random.choice(train_size, 4*320, replace=False)
metrics.make_plots(opts, 0, data.data,
data.data[random_idx], adagan._data_weights, prefix='dataset_')
for step in range(opts["adagan_steps_total"]):
logging.info('Running step {} of AdaGAN'.format(step + 1))
adagan.make_step(opts, data)
num_fake = opts['eval_points_num']
logging.debug('Sampling fake points')
fake_points = adagan.sample_mixture(num_fake)
logging.debug('Sampling more fake points')
more_fake_points = adagan.sample_mixture(500)
logging.debug('Plotting results')
if opts['dataset'] == 'gmm':
metrics.make_plots(opts, step, data.data[:500],
fake_points[0:100], adagan._data_weights[:500])
logging.debug('Evaluating results')
(likelihood, C) = metrics.evaluate(
opts, step, data.data[:500],
fake_points, more_fake_points, prefix='')
else:
metrics.make_plots(opts, step, data.data,
fake_points[:320], adagan._data_weights)
if opts['inverse_metric']:
logging.debug('Evaluating results')
l2 = np.min(adagan._invert_losses[:step + 1], axis=0)
logging.debug('MSE=%.5f, STD=%.5f' % (np.mean(l2), | np.std(l2) | numpy.std |
import numpy as np
from itertools import combinations
import dask.array as dsa
from ..core import (
histogram,
_ensure_correctly_formatted_bins,
_ensure_correctly_formatted_range,
)
from .fixtures import empty_dask_array
import pytest
bins_int = 10
bins_str = "auto"
bins_arr = np.linspace(-4, 4, 10)
range_ = (0, 1)
@pytest.mark.parametrize("density", [False, True])
@pytest.mark.parametrize("block_size", [None, 1, 2])
@pytest.mark.parametrize("axis", [1, None])
@pytest.mark.parametrize("bins", [10, np.linspace(-4, 4, 10), "auto"])
@pytest.mark.parametrize("range_", [None, (-4, 4)])
def test_histogram_results_1d(block_size, density, axis, bins, range_):
nrows, ncols = 5, 20
# Setting the random seed here prevents np.testing.assert_allclose
# from failing beow. We should investigate this further.
np.random.seed(2)
data = np.random.randn(nrows, ncols)
h, bin_edges = histogram(
data, bins=bins, range=range_, axis=axis, block_size=block_size, density=density
)
expected_shape = (
(nrows, len(bin_edges[0]) - 1) if axis == 1 else (len(bin_edges[0]) - 1,)
)
assert h.shape == expected_shape
# make sure we get the same thing as numpy.histogram
if axis:
bins_np = np.histogram_bin_edges(
data, bins=bins, range=range_
) # Use same bins for all slices below
expected = np.stack(
[
np.histogram(data[i], bins=bins_np, range=range_, density=density)[0]
for i in range(nrows)
]
)
else:
expected = np.histogram(data, bins=bins, range=range_, density=density)[0]
norm = nrows if (density and axis) else 1
np.testing.assert_allclose(h, expected / norm)
if density:
widths = np.diff(bin_edges)
integral = np.sum(h * widths)
np.testing.assert_allclose(integral, 1.0)
@pytest.mark.parametrize("block_size", [None, 1, 2])
def test_histogram_results_1d_weighted(block_size):
nrows, ncols = 5, 20
data = np.random.randn(nrows, ncols)
bins = np.linspace(-4, 4, 10)
h, _ = histogram(data, bins=bins, axis=1, block_size=block_size)
weights = 2 * np.ones_like(data)
h_w, _ = histogram(data, bins=bins, axis=1, weights=weights, block_size=block_size)
np.testing.assert_array_equal(2 * h, h_w)
# @pytest.mark.skip(reason="Weight broadcasting on numpy arrays is not yet implemented")
@pytest.mark.parametrize("block_size", [None, 1, 2, "auto"])
def test_histogram_results_1d_weighted_broadcasting(block_size):
nrows, ncols = 5, 20
data = np.random.randn(nrows, ncols)
bins = np.linspace(-4, 4, 10)
h, _ = histogram(data, bins=bins, axis=1, block_size=block_size)
weights = 2 * np.ones((1, ncols))
h_w, _ = histogram(data, bins=bins, axis=1, weights=weights, block_size=block_size)
np.testing.assert_array_equal(2 * h, h_w)
@pytest.mark.parametrize("block_size", [None, 1, 2])
def test_histogram_right_edge(block_size):
"""Test that last bin is both left- and right-edge inclusive as it
is for numpy.histogram
"""
nrows, ncols = 5, 20
data = np.ones((nrows, ncols))
bins = np.array([0, 0.5, 1]) # All data at rightmost edge
h, _ = histogram(data, bins=bins, axis=1, block_size=block_size)
assert h.shape == (nrows, len(bins) - 1)
# make sure we get the same thing as histogram (all data in the last bin)
hist, _ = np.histogram(data, bins=bins)
np.testing.assert_array_equal(hist, h.sum(axis=0))
# now try with no axis
h_na, _ = histogram(data, bins=bins, block_size=block_size)
np.testing.assert_array_equal(hist, h_na)
def test_histogram_results_2d():
nrows, ncols = 5, 20
data_a = np.random.randn(nrows, ncols)
data_b = np.random.randn(nrows, ncols)
nbins_a = 9
bins_a = np.linspace(-4, 4, nbins_a + 1)
nbins_b = 10
bins_b = np.linspace(-4, 4, nbins_b + 1)
h, _ = histogram(data_a, data_b, bins=[bins_a, bins_b])
assert h.shape == (nbins_a, nbins_b)
hist, _, _ = np.histogram2d(data_a.ravel(), data_b.ravel(), bins=[bins_a, bins_b])
np.testing.assert_array_equal(hist, h)
def test_histogram_results_2d_density():
nrows, ncols = 5, 20
data_a = np.random.randn(nrows, ncols)
data_b = np.random.randn(nrows, ncols)
nbins_a = 9
bins_a = np.linspace(-4, 4, nbins_a + 1)
nbins_b = 10
bins_b = np.linspace(-4, 4, nbins_b + 1)
h, _ = histogram(data_a, data_b, bins=[bins_a, bins_b], density=True)
assert h.shape == (nbins_a, nbins_b)
hist, _, _ = np.histogram2d(
data_a.ravel(), data_b.ravel(), bins=[bins_a, bins_b], density=True
)
np.testing.assert_allclose(hist, h)
# check integral is 1
widths_a = np.diff(bins_a)
widths_b = np.diff(bins_b)
areas = np.outer(widths_a, widths_b)
integral = np.sum(hist * areas)
np.testing.assert_allclose(integral, 1.0)
def test_histogram_results_3d_density():
nrows, ncols = 5, 20
data_a = np.random.randn(nrows, ncols)
data_b = np.random.randn(nrows, ncols)
data_c = np.random.randn(nrows, ncols)
nbins_a = 9
bins_a = np.linspace(-4, 4, nbins_a + 1)
nbins_b = 10
bins_b = np.linspace(-4, 4, nbins_b + 1)
nbins_c = 9
bins_c = np.linspace(-4, 4, nbins_c + 1)
h, _ = histogram(
data_a, data_b, data_c, bins=[bins_a, bins_b, bins_c], density=True
)
assert h.shape == (nbins_a, nbins_b, nbins_c)
hist, _ = np.histogramdd(
(data_a.ravel(), data_b.ravel(), data_c.ravel()),
bins=[bins_a, bins_b, bins_c],
density=True,
)
np.testing.assert_allclose(hist, h)
# check integral is 1
widths_a = | np.diff(bins_a) | numpy.diff |
"""Abstractions and default converters."""
import abc
import copy
import dataclasses
import enum
import itertools
from typing import Any, Callable, Collection, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Union
from absl import logging
import attr
import numpy as np
from vizier import pyvizier
# The study identifier for cross-study learning must be stored in
# Trial.Metadata and StudyConfig.Metadata with this key.
# TODO: Use metadata namespace instead.
STUDY_ID_FIELD = 'metalearn_study_id'
class NumpyArraySpecType(enum.Enum):
"""Type information for NumpyArraySpec.
CONTINUOUS: Continuous parameter
DISCRETE: Discrete/integer/categorical parameter
ONEHOT_EMBEDDING: One-hot embedding of DISCRETE.
"""
CONTINUOUS = 'CONTINUOUS'
DISCRETE = 'DISCRETE'
ONEHOT_EMBEDDING = 'ONEHOT_EMBEDDING'
@classmethod
def default_factory(cls,
pc: pyvizier.ParameterConfig) -> 'NumpyArraySpecType':
"""SpecType when encoding discretes as integer indices."""
if pc.type == pyvizier.ParameterType.DOUBLE:
return NumpyArraySpecType.CONTINUOUS
elif pc.type in (pyvizier.ParameterType.DISCRETE,
pyvizier.ParameterType.CATEGORICAL,
pyvizier.ParameterType.INTEGER):
return NumpyArraySpecType.DISCRETE
raise ValueError(f'Unknown type {pc.type}')
@classmethod
def embedding_factory(cls,
pc: pyvizier.ParameterConfig) -> 'NumpyArraySpecType':
"""SpecType when encoding discretes as onehot embedding."""
if pc.type == pyvizier.ParameterType.DOUBLE:
return NumpyArraySpecType.CONTINUOUS
elif pc.type in (pyvizier.ParameterType.DISCRETE,
pyvizier.ParameterType.CATEGORICAL,
pyvizier.ParameterType.INTEGER):
return NumpyArraySpecType.ONEHOT_EMBEDDING
raise ValueError(f'Unknown type {pc.type}')
@attr.define(frozen=True, auto_attribs=True)
class NumpyArraySpec:
"""Encodes what an array represents.
This class is similar to `BoundedTensorSpec` in tf agents, except it carries
extra information specific to vizier.
If `type` is `DOUBLE`, then `dtype` is a floating type, and bounds are
floating numbers. num_dimensions is always 1, and num_oovs is zero.
If 'type' is `DISCRETE`, then `dtype` is an integer type, and bounds are
integers. num_dimensions is always 1. Suppose `bounds=(x,y)`. Then integers
x to (y-num_oovs) correspond to valid parameter values. The rest represent
out-of-vocabulary values. For example, an integer parameter in range (1,3)
can be represented by a DISCRETE NumpyArraySpec with bounds=(1,4) and oov=1.
If 'type' is `ONEHOT_EMBEDDING`, then `dtype` is a floating type, and bounds
are floating numbers. Suppose num_dimensions is X.
Attributes:
type: Underlying type of the Vizier parameter corresponding to the array.
dtype: Numpy array's type.
bounds: Always inclusive in both directions.
num_dimensions: Corresponds to shape[-1] of the numpy array. When `type` is
`ONEHOT_EMBEDDING`, the first X dimensions correspond to valid parameter
values. The other dimensions correspond to out-of-vocabulary values.
Otherwise, it is simply 1.
name: Parameter name.
num_oovs: Number of out-of-vocabulary items, for non-continuous type.
scale: Scaling of the values.
"""
type: NumpyArraySpecType
dtype: np.dtype
bounds: Union[Tuple[float, float], Tuple[int, int]]
num_dimensions: int
name: str
num_oovs: int
scale: Optional[pyvizier.ScaleType] = None
def __attrs_post_init__(self):
object.__setattr__(self, 'bounds',
tuple(np.array(self.bounds, dtype=self.dtype)))
@classmethod
def from_parameter_config(
cls,
pc: pyvizier.ParameterConfig,
type_factory: Callable[
[pyvizier.ParameterConfig],
NumpyArraySpecType] = NumpyArraySpecType.default_factory,
floating_dtype: np.dtype = np.float32,
int_dtype: np.dtype = np.int32,
*,
pad_oovs: bool = True) -> 'NumpyArraySpec':
"""Factory function.
Args:
pc:
type_factory: NumpyArraySpecType has `default_factory` and
`embedding_factory`. The difference is in how they handle non-continuous
parameters.
floating_dtype: Dtype of the floating outputs.
int_dtype: Dtype of the integer outputs.
pad_oovs: If True, pad the out-of-vocabulary dimensions to onehot
embedding.
Returns:
NumpyArraySpec.
"""
the_type = type_factory(pc)
if the_type == NumpyArraySpecType.CONTINUOUS:
return NumpyArraySpec(
the_type,
floating_dtype,
bounds=pc.bounds,
num_dimensions=1,
scale=pc.scale_type,
name=pc.name,
num_oovs=0)
elif the_type == NumpyArraySpecType.DISCRETE:
return NumpyArraySpec(
the_type,
int_dtype,
bounds=(0, len(pc.feasible_values)),
num_dimensions=1,
name=pc.name,
num_oovs=1 if pad_oovs else 0)
elif the_type == NumpyArraySpecType.ONEHOT_EMBEDDING:
return NumpyArraySpec(
the_type,
floating_dtype,
bounds=(0., 1.),
num_dimensions=len(pc.feasible_values) + 1,
name=pc.name,
num_oovs=1 if pad_oovs else 0)
raise ValueError(f'Unknown type {type}')
def dict_to_array(array_dict: Dict[str, np.ndarray]) -> np.ndarray:
r"""Converts a dict of (..., D_i) arrays to a (..., \sum_i D_i) array."""
return np.concatenate(list(array_dict.values()), axis=-1)
class DictOf2DArrays(Mapping[str, np.ndarray]):
"""Dictionary of string to 2D arrays.
All arrays share the first dimension, which is at a high level, the number of
objects that this dictionary corresponds to.
Attributes:
size: Array's shape[0].
"""
def __init__(self, d: Dict[str, np.ndarray]):
self._d = d
shape = None
for k, v in self.items():
if shape is None:
shape = v.shape
if len(shape) != 2:
raise ValueError(f'{k} has shape {v.shape} which is not length 2.'
'DictOf2DArrays only supports 2D numpy arrays.')
if shape[0] != v.shape[0]:
raise ValueError(
f'{k} has shape {v.shape} which is not equal to {shape}.')
self._size = shape[0]
def __getitem__(self, key: str) -> np.ndarray:
return self._d[key]
def __iter__(self) -> Iterator[str]:
return iter(self._d)
def __len__(self) -> int:
return len(self._d)
def __add__(self, other: 'DictOf2DArrays') -> 'DictOf2DArrays':
if not isinstance(other, DictOf2DArrays):
raise ValueError('You can add DictOf2DArrays only.')
if len(self) != len(other):
# We don't check the keys because it's too slow.
raise ValueError('Two arrays have different length!')
return DictOf2DArrays(
{k: np.concatenate([self[k], other[k]], axis=0) for k in self})
@property
def size(self) -> int:
return self._size
def asarray(self) -> np.ndarray:
return dict_to_array(self._d)
def dict_like(self, array: np.ndarray) -> 'DictOf2DArrays':
"""[Experimental] Converts an array into a dict with the same keys as this.
This function acts like an inverse of `asarray()`, i.e. it satisfies
`self.dict_like(self.asarray()) == self`.
Example:
d = DictOf2DArrays({'p1': [[1], [2], [3]], 'p2': [[4], [5], [6]]})
d.dict_like([[1, 2], [3, 4]]) == {'p1': [[1], [3]], 'p2': [[2],[4]]}
Args:
array:
Returns:
DictOf2DArrays with the same shape spec as this.
"""
begin = 0
new_dict = dict()
for k, v in self.items():
end = begin + v.shape[1]
new_dict[k] = array[:, begin:end].astype(v.dtype)
begin = end
return DictOf2DArrays(new_dict)
class TrialToNumpyDict(abc.ABC):
"""Parses a sequence of Trials to a dict keyed by parameter and metric names.
A typical Keras/JAX pipeline consists of:
1. Load data into arrays.
2. Call Model.build() to initialize a model for the loaded data shape.
3. Call Model.fit() to train the model.
4. Call Model.__call__() to predict with the model.
This abstraction allows a shared implementation of steps 1,2 and 3.
"""
@abc.abstractmethod
def to_xy(
self, trials: Sequence[pyvizier.Trial]
) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]:
"""Returns (x,y) pair that can be used as input for keras.Model.fit()."""
pass
@property
@abc.abstractmethod
def features_shape(self) -> Dict[str, Sequence[Union[int, None]]]:
"""Returned value can be used as `input_shape` for keras.Model.build()."""
pass
@property
@abc.abstractmethod
def output_specs(self) -> Dict[str, NumpyArraySpec]:
"""Same keys as features_shape, with more details."""
pass
@property
@abc.abstractmethod
def labels_shape(self) -> Dict[str, Any]:
pass
@property
@abc.abstractmethod
def metric_information(self) -> Dict[str, pyvizier.MetricInformation]:
pass
class ModelInputConverter(metaclass=abc.ABCMeta):
"""Interface for extracting inputs to the model."""
@abc.abstractmethod
def convert(self, trials: Sequence[pyvizier.Trial]) -> np.ndarray:
"""Returns an array of shape (number of trials, feature dimension).
Args:
trials:
Returns:
Returns an array of shape (number of trials, feature dimension).
Subclasses must use a fixed feature dimension. In particular, it should
be a constant function of the input trials.
"""
pass
@property
@abc.abstractmethod
def output_spec(self) -> NumpyArraySpec:
"""Provides specification of the output from this converter."""
pass
@property
@abc.abstractmethod
def parameter_config(self):
"""Original ParameterConfig that this converter acts on."""
pass
@abc.abstractmethod
def to_parameter_values(
self, array: np.ndarray) -> List[Optional[pyvizier.ParameterValue]]:
"""Convert to parameter values."""
pass
@dataclasses.dataclass
class _ModelInputArrayBijector:
"""Transformations on the numpy arrays generated by ModelInputConverter."""
forward_fn: Callable[[np.ndarray], np.ndarray]
backward_fn: Callable[[np.ndarray], np.ndarray]
output_spec: NumpyArraySpec # Spec after forward_fn is applied.
@classmethod
def identity(cls, spec) -> '_ModelInputArrayBijector':
return cls(lambda x: x, lambda x: x, spec)
@classmethod
def scaler_from_spec(cls, spec: NumpyArraySpec) -> '_ModelInputArrayBijector':
"""For continuous specs, linearize and scale it to (0, 1) range."""
low, high = spec.bounds
if spec.type != NumpyArraySpecType.CONTINUOUS:
return cls.identity(attr.evolve(spec, scale=None))
if low == high:
def backward_fn(y):
return np.where(np.isfinite(y), np.zeros_like(y) + low, y)
return cls(lambda x: np.where(np.isfinite(x), np.zeros_like(x), x),
backward_fn, attr.evolve(spec, bounds=(.0, 1.), scale=None))
if spec.scale == pyvizier.ScaleType.LOG:
low, high = np.log(low), np.log(high)
denom = (high - low) or 1.0
if denom < 1e-6:
logging.warning('Unusually small range detected for %s', spec)
scale_fn = lambda x, high=high, low=low: (np.log(x) - low) / (high - low)
unscale_fn = lambda x, high=high, low=low: np.exp(x * (high - low) + low)
else:
if not (spec.scale == pyvizier.ScaleType.LINEAR or spec.scale is None):
logging.warning('Unknown scale type %s. Applying LINEAR', spec.scale)
denom = (high - low)
if denom < 1e-6:
logging.warning('Unusually small range detected for %s', spec)
if denom == 1.0 and low == 0:
return cls.identity(attr.evolve(spec, scale=None))
scale_fn = lambda x, high=high, low=low: (x - low) / (high - low)
unscale_fn = lambda x, high=high, low=low: x * (high - low) + low
return cls(scale_fn, unscale_fn,
attr.evolve(spec, bounds=(.0, .1), scale=None))
@classmethod
def onehot_embedder_from_spec(cls,
spec: NumpyArraySpec,
*,
dtype=np.float32,
pad_oovs: bool = True
) -> '_ModelInputArrayBijector':
"""Given a discrete spec, one-hot embeds it."""
if spec.type != NumpyArraySpecType.DISCRETE:
return cls.identity(spec)
num_oovs = 1 if pad_oovs else 0
output_spec = NumpyArraySpec(
NumpyArraySpecType.ONEHOT_EMBEDDING,
dtype,
bounds=(0., 1.),
num_dimensions=int(spec.bounds[1] - spec.bounds[0] + num_oovs),
name=spec.name,
num_oovs=num_oovs,
scale=None)
def embed_fn(x: np.ndarray, output_spec=output_spec):
"""x is integer array of [N, 1]."""
return np.eye(
output_spec.num_dimensions, dtype=output_spec.dtype)[x.flatten()]
def unembed_fn(x: np.ndarray, spec=spec, output_spec=output_spec):
return np.argmax(
x[:, :output_spec.num_dimensions - output_spec.num_oovs],
axis=1).astype(spec.dtype)
return cls(embed_fn, unembed_fn, output_spec)
def _create_default_getter(
pconfig: pyvizier.ParameterConfig) -> Callable[[pyvizier.Trial], Any]:
"""Create a default getter for the given parameter config."""
def getter(trial, pconfig=pconfig):
if pconfig.name not in trial.parameters:
return None
pvalue = trial.parameters[pconfig.name]
if pconfig.type == pyvizier.ParameterType.DOUBLE:
return pvalue.as_float
elif pconfig.type == pyvizier.ParameterType.DISCRETE:
return pvalue.as_float
elif pconfig.type == pyvizier.ParameterType.INTEGER:
return pvalue.as_int
else:
return pvalue.as_str
return getter
class DefaultModelInputConverter(ModelInputConverter):
"""Converts trials into a (None, 1) array corresponding to a parameter.
If the parameter_config is continuous, values obtained from `getter()` are
directly returned as floating numbers. Otherwise, this converter returns
the index of the value obtained from `getter()` within
`parameter_config.feasible_points` as int32.
"""
def __init__(self,
parameter_config: pyvizier.ParameterConfig,
getter: Optional[Callable[[pyvizier.Trial], Any]] = None,
*,
float_dtype: np.dtype = np.float32,
max_discrete_indices: int = 10,
scale: bool = False,
onehot_embed: bool = False,
converts_to_parameter: bool = True,
pad_oovs: bool = True):
"""Init.
Given B trials, convert() always converts to (B, 1) array. The returned
array may contain NaNs.
Args:
parameter_config:
getter: See class pydoc. If the getter is not specified, the default
getter looks up `parameter_config.name` inside `Trial.parameters`.
float_dtype: floating precision to be used.
max_discrete_indices: If the parameter config has more than this many
number of DISCRETE/INTEGER feasible points, then the parameter config is
continuified first.
scale:
onehot_embed:
converts_to_parameter: If False, this converter does not correspodn to an
actual parameter in Vizier search space, and `to_parameter_value` always
returns None
pad_oovs: If True, pad the out-of-vocabulary dimensions to onehot
embedding.
"""
self._converts_to_parameter = converts_to_parameter
self._parameter_config = copy.deepcopy(parameter_config)
if parameter_config.type in (
pyvizier.ParameterType.INTEGER, pyvizier.ParameterType.DISCRETE
) and parameter_config.num_feasible_values > max_discrete_indices:
parameter_config = parameter_config.continuify()
# TODO: Make the default getter raise an Error if they encounter an
# out-of-vocabulary value but pad_oovs is False.
self._getter = getter or _create_default_getter(parameter_config)
# Getter spec can only have DISCRETE or CONTINUOUS types.
self._getter_spec = NumpyArraySpec.from_parameter_config(
parameter_config,
NumpyArraySpecType.default_factory,
floating_dtype=float_dtype)
# Optionally scale and onehot embed.
spec = self._getter_spec
self.scaler = (
_ModelInputArrayBijector.scaler_from_spec(spec)
if scale else _ModelInputArrayBijector.identity(spec))
spec = self.scaler.output_spec
self.onehot_encoder = (
_ModelInputArrayBijector.onehot_embedder_from_spec(
spec, dtype=float_dtype, pad_oovs=pad_oovs)
if onehot_embed else _ModelInputArrayBijector.identity(spec))
spec = self.onehot_encoder.output_spec
self._output_spec = spec
def convert(self, trials: Sequence[pyvizier.Trial]) -> np.ndarray:
"""Returns an array of shape [len(trials), output_spec.num_dimensions].
Args:
trials:
Returns:
For each `trial`, if `self.getter(trial)` returns `None`, we _impute_ the
value; otherwise, we _extract_ the value.
If `self.parameter_config.type` is `DOUBLE`, then
* EXTRACT: Directly use the getter's return value as float.
* IMPUTE: Return `nan`.
Otherwise,
* EXTRACT: Returns the integer index of the getter's return value within
feasible values.
* IMPUTE: Returns `len(feasible_values)`.
"""
if not trials:
return np.zeros([0, self.output_spec.num_dimensions],
dtype=self.output_spec.dtype)
value_converter = (
self._convert_index if self._getter_spec.type
== NumpyArraySpecType.DISCRETE else self._convert_continuous)
values = [value_converter(t) for t in trials]
array = | np.asarray(values, dtype=self._getter_spec.dtype) | numpy.asarray |
"""
Module for building the structural model
<EMAIL>
v. 2017-12-17
"""
import numpy as np
import numpy.matlib
########################################################################
def masMat(nDof,inpMas,fixDof):
"""
Returns structural lumped mass matrix.
Input:
- nDof: number of active DOFs
- inpMas: list with the lumped masses
- fixDof: list of active DOFs
Output:
- M_m: structural mass matrix
"""
M_m = np.matlib.zeros((nDof,nDof))
for masNod in inpMas:
nod = masNod[0]
dof = np.sum(fixDof[0:nod][:])
j = 1
for act in fixDof[nod][:]:
M_m[dof,dof] = masNod[j]
dof+=act
j+=1
return M_m
########################################################################
def stiMat(nDof,strEle,mesDat,eltPro,fixDof):
"""
Assemble and returns structural stiffness matrix.
-------
Inputs:
strEle: for each element [node I, node J, type]
mesDat: for each element [length, cos, sin]
eltPro: for each elemet type [material properties]
- type 0 (beam): [matE,secA,secIy]
- type 1 (connection): [secMpl]
matE: material elastic modulus
secA: element section area
secIy: section moment of inertia about its centroid axis
secMpl: section plastic moment
-------
Output:
K_m: structural stiffness matrix
"""
# initialize
# Note: in FE programs, matrices are not stored like below (zeros are not stored); here
# this is just for illustration
K_m = np.matlib.zeros((nDof,nDof))
# loop over the elements
eNum = 0
for elt in strEle:
nodI = elt[0] # node numbers
nodJ = elt[1]
eltTyp = elt[2] # element type
secTyp = elt[3] # section type
# build element stiffness matrix in local coord. system
if eltTyp == 0: # element of type 0 (beam/column)
eltL = mesDat[eNum][0]
matE = eltPro[secTyp][0]
secA = eltPro[secTyp][1]
secI = eltPro[secTyp][2]
KeLoc_m = beaSti_EB_2D_loc(matE,secA,eltL,secI)
elif eltTyp == 1: # rigid rotational spring
secMpl = eltPro[eltTyp][0]
# d_a1 = np.concatenate(dis_v[I:I+3], dis_v[J:J+3])
# KeLoc_a2 = strMod.rigRotSpr_2D_loc(E,A,Lf,I,My,d_a1) -> To be developed for NL response
KeLoc_m = rigRotSprSti(secMpl)
# transform to global coordinate system
cos = mesDat[eNum][1]
sin = mesDat[eNum][2]
R_m = glo2loc_2D(cos,sin)
KeGlo_m = | np.transpose(R_m) | numpy.transpose |
from __future__ import print_function
import os
import sqlite3
import healpy as hp
import numpy as np
import pickle as pk
from plancklens.helpers import mpi
from plancklens import utils
class rng_db:
""" Class to save and read random number generators states in a sqlite database file.
"""
def __init__(self, fname, idtype="INTEGER"):
if not os.path.exists(fname) and mpi.rank == 0:
con = sqlite3.connect(fname, detect_types=sqlite3.PARSE_DECLTYPES, timeout=3600)
cur = con.cursor()
cur.execute("create table rngdb (id %s PRIMARY KEY, "
"type STRING, pos INTEGER, has_gauss INTEGER,cached_gaussian REAL, keys STRING)" % idtype)
con.commit()
mpi.barrier()
self.con = sqlite3.connect(fname, timeout=3600., detect_types=sqlite3.PARSE_DECLTYPES)
def add(self, idx, state):
try:
assert (self.get(idx) is None)
keys_string = '_'.join(str(s) for s in state[1])
self.con.execute("INSERT INTO rngdb (id, type, pos, has_gauss, cached_gaussian, keys) VALUES (?,?,?,?,?,?)",
(idx, state[0], state[2], state[3], state[4], keys_string))
self.con.commit()
except:
print("rng_db::rngdb add failed!")
def get(self, idx):
cur = self.con.cursor()
cur.execute("SELECT type, pos, has_gauss, cached_gaussian, keys FROM rngdb WHERE id=?", (idx,))
data = cur.fetchone()
cur.close()
if data is None:
return None
else:
assert (len(data) == 5)
typ, pos, has_gauss, cached_gaussian, keys = data
keys = np.array([int(a) for a in keys.split('_')], dtype=np.uint32)
return [typ, keys, pos, has_gauss, cached_gaussian]
def delete(self, idx):
try:
if self.get(idx) is None:
return
self.con.execute("DELETE FROM rngdb WHERE id=?", (idx,))
self.con.commit()
except:
print("rng_db::rngdb delete %s failed!" % idx)
class sim_lib(object):
"""Generic class for simulations where only rng state is stored.
np.random rng states are stored in a sqlite3 database. By default the rng state function is np.random.get_state.
The rng_db class is tuned for this state fct, you may need to adapt this.
"""
def __init__(self, lib_dir, get_state_func=np.random.get_state, nsims_max=None):
if not os.path.exists(lib_dir) and mpi.rank == 0:
os.makedirs(lib_dir)
self.nmax = nsims_max
fn_hash = os.path.join(lib_dir, 'sim_hash.pk')
if mpi.rank == 0 and not os.path.exists(fn_hash):
pk.dump(self.hashdict(), open(fn_hash, 'wb'), protocol=2)
mpi.barrier()
hsh = pk.load(open(fn_hash, 'rb'))
utils.hash_check(hsh, self.hashdict(), ignore=['lib_dir'])
self._rng_db = rng_db(os.path.join(lib_dir, 'rngdb.db'), idtype='INTEGER')
self._get_rng_state = get_state_func
def get_sim(self, idx, **kwargs):
"""Returns sim number idx and caches random number generator state. """
if self.has_nmax(): assert idx < self.nmax
if not self.is_stored(idx):
self._rng_db.add(idx, self._get_rng_state())
return self._build_sim_from_rng(self._rng_db.get(idx), **kwargs)
def has_nmax(self):
return not self.nmax is None
def is_stored(self, idx):
"""Checks whether sim idx is stored or not. Boolean output. """
return not self._rng_db.get(idx) is None
def is_full(self):
"""Checks whether all sims are stored or not. Boolean output. """
if not self.has_nmax(): return False
for idx in range(self.nmax):
if not self.is_stored(idx): return False
return True
def is_empty(self):
"""Checks whether any sims is stored. Boolean output. """
assert self.nmax is not None
for idx in range(self.nmax):
if self.is_stored(idx): return False
return True
def hashdict(self):
"""Override this """
assert 0
def _build_sim_from_rng(self, rng_state):
"""Override this """
assert 0
class _pix_lib_phas(sim_lib):
def __init__(self, lib_dir, shape, **kwargs):
self.shape = shape
super(_pix_lib_phas, self).__init__(lib_dir, **kwargs)
def _build_sim_from_rng(self, rng_state, **kwargs):
np.random.set_state(rng_state)
return | np.random.standard_normal(self.shape) | numpy.random.standard_normal |
from pymatgen.io.vasp.outputs import Chgcar
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.core.periodic_table import Element
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.operations import SymmOp
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.colors import Normalize
import numpy as np
from scipy.ndimage import gaussian_filter
import matplotlib.patheffects as pa
import copy as copy
from vaspvis.utils import make_supercell, group_layers
from ase.data.colors import jmol_colors
from sklearn.neighbors import radius_neighbors_graph
import os
import time
class STM:
"""
This class contains all the methods for generating STM images with VASP
"""
def __init__(
self,
folder,
):
self.folder = folder
self.preloaded_data = os.path.isfile(os.path.join(folder, 'parchg.npy'))
self.poscar = Poscar.from_file(
os.path.join(folder, 'POSCAR'),
check_for_POTCAR=False,
read_velocities=False
)
self.data, self.a_vals, self.b_vals, self.c_vals = self._load_parchg()
[self.bottom_surface,
self.bottom_ind,
self.top_surface,
self.top_ind] = self._get_surface_heights()
self.X = None
self.Y = None
self.Z = None
self.x_shift = None
self.y_shift = None
def _load_parchg(self):
if self.preloaded_data:
with open(os.path.join(self.folder, 'parchg.npy'), 'rb') as p:
data = np.load(p)
else:
parchg = Chgcar.from_file(os.path.join(self.folder, 'PARCHG'))
data = parchg.data['total']
np.save(os.path.join(self.folder, 'parchg.npy'), data)
a_vals = np.linspace(0,1,data.shape[0])
b_vals = np.linspace(0,1,data.shape[1])
c_vals = np.linspace(0,1,data.shape[2])
return data, a_vals, b_vals, c_vals
def _get_surface_heights(self):
bottom_surface = self.poscar.structure.frac_coords[:,-1].min()
top_surface = self.poscar.structure.frac_coords[:,-1].max()
bottom_ind = np.argmin((self.c_vals - bottom_surface)**2)
top_ind = np.argmin((self.c_vals - top_surface)**2)
return bottom_surface, bottom_ind, top_surface, top_ind
def _interp(self, x, x1, x2, y1, y2):
return y1 + (((y2 - y1) / (x2 - x1)) * (x - x1))
def _rotate_structure(self, structure, angle):
copy_structure = copy.copy(structure)
angle = angle * (np.pi / 180)
operation = SymmOp.from_rotation_and_translation(
rotation_matrix=np.array([
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0,0,1],
]),
translation_vec=[0,0,0],
)
copy_structure.apply_operation(operation, fractional=False)
return copy_structure
def _get_constant_current_isosurface(self, current, sigma=6, top=True):
slab_middle_ind = int((self.top_ind + self.bottom_ind) / 2)
cell_middle_ind = int(self.data.shape[-1] / 2)
shift = cell_middle_ind - slab_middle_ind
init_shape = self.data.shape[:2]
shifted_slab = np.roll(
self.data,
shift,
axis=2,
)
c_vals = self.c_vals
c_vals_extended = np.hstack([c_vals[:-1] - 1, c_vals, c_vals[1:] + 1])
shifted_cvals = np.roll(
c_vals_extended,
shift,
)
shifted_cvals = shifted_cvals[len(c_vals)-1:(2*len(c_vals))-1]
if top:
shifted_slab = shifted_slab[:,:,self.top_ind+shift:]
shifted_cvals = shifted_cvals[self.top_ind+shift:]
else:
shifted_slab = shifted_slab[:,:,:self.bottom_ind+shift]
shifted_cvals = shifted_cvals[:self.bottom_ind+shift]
if top:
heights = np.zeros(shifted_slab.shape[:2])
inds = np.zeros(shifted_slab.shape[:2], dtype=bool)
for i in range(0, shifted_slab.shape[-1]-1)[::-1]:
points = inds < (shifted_slab[:,:,i] > current)
x1 = shifted_slab[points, i]
x2 = shifted_slab[points, i+1]
y1 = shifted_cvals[i]
y2 = shifted_cvals[i+1]
heights[points] = self._interp(
x=current,
x1=x1,
x2=x2,
y1=y1,
y2=y2,
)
inds[points] = True
heights[heights <= self.top_surface] = heights[heights > self.top_surface].min()
else:
heights = np.zeros(shifted_slab.shape[:2])
inds = np.zeros(shifted_slab.shape[:2], dtype=bool)
for i in range(1, shifted_slab.shape[-1]):
points = inds < (shifted_slab[:,:,i] > current)
x1 = shifted_slab[points, i-1]
x2 = shifted_slab[points, i]
y1 = shifted_cvals[i-1]
y2 = shifted_cvals[i]
heights[points] = self._interp(
x=current,
x1=x1,
x2=x2,
y1=y1,
y2=y2,
)
inds[points] = True
heights[heights >= self.top_surface] = heights[heights < self.top_surface].min()
return heights
def _generate_supercell(self, x, y, Z, scaling_matrix=[8,8]):
x = np.concatenate([x + i for i in range(scaling_matrix[0])])
y = np.concatenate([y + i for i in range(scaling_matrix[1])])
Z = np.hstack([
np.vstack([Z for _ in range(scaling_matrix[0])]) for _ in range(scaling_matrix[1])
])
return x, y, Z
def _get_intercept(self, midpoint, vector):
if vector[0] == 0:
intersect = [0, midpoint[1]]
else:
slope = vector[1] / vector[0]
f = ((slope * midpoint[1]) + midpoint[0])/ ((slope**2) + 1)
intersect = [f, slope * f]
return intersect
def _get_ratio(self, a, b):
a_norm = np.linalg.norm(a)
b_norm = np.linalg.norm(b)
ratio_array = np.ones(2)
min_ind = np.argmin([a_norm, b_norm])
ratio = np.max([a_norm, b_norm]) / np.min([a_norm, b_norm])
ratio_array[min_ind] = ratio
return ratio_array
def _get_square(self, a, b):
midpoint = 0.5 * (a + b)
a_inter = self._get_intercept(midpoint, a)
b_inter = self._get_intercept(midpoint, b)
a_len = np.linalg.norm(a_inter - midpoint)
b_len = np.linalg.norm(b_inter - midpoint)
r = np.min([a_len, b_len])
box_length = (2 * r) / np.sqrt(2)
return box_length, midpoint
def _get_scaling_matrix(self, a, b, scan_size=40):
final_box_length = 0
final_midpoint = np.zeros(2)
ratio = self._get_ratio(a, b)
scaling_matrix = | np.ones(2) | numpy.ones |
# Dynamic Neural Field simulation
# Copyright (c) 2017 <NAME>
'''
Dynamic neural field
====================
This script implements the numerical integration of dynamic neural field of the
form:
∂U(x,t) ⌠+∞
τ ------- = -U(x,t) + ⎮ w(|x-y|).f(U(y,t)).dy + I(x,t) + h
∂t ⌡-∞
where U(x,t) is the potential of a neural population at position x and time t
W(d) is a neighborhood function from ℝ⁺ → ℝ
f(x) is the firing rate of a single neuron from ℝ → ℝ
I(x,t) is the input at position x and time t
h is the resting potential
τ is the temporal decay of the synapse
References:
http://www.scholarpedia.org/article/Neural_fields
'''
import numpy as np
import scipy.linalg
from scipy.ndimage.filters import convolve
def gaussian(n=40, center=(0,0), sigma=0.1):
xmin, xmax = -1, +1
ymin, ymax = -1, +1
x0, y0 = center
X, Y = np.meshgrid(np.linspace(xmin-x0, xmax-x0, n, endpoint=True),
np.linspace(ymin-y0, ymax-y0, n, endpoint=True))
D = X*X+Y*Y
return np.exp(-0.5*D/sigma**2)
def convolve1d( Z, K ):
# return convolve(Z, K, mode='constant')
R = np.convolve(Z, K, 'same')
i0 = 0
if R.shape[0] > Z.shape[0]:
i0 = (R.shape[0]-Z.shape[0])//2 + 1 - Z.shape[0]%2
i1 = i0 + Z.shape[0]
return R[i0:i1]
def convolve2d(Z, K, USV = None):
epsilon = 1e-9
if USV is None:
U,S,V = scipy.linalg.svd(K)
U,S,V = U.astype(K.dtype), S.astype(K.dtype), V.astype(K.dtype)
else:
U,S,V = USV
n = (S > epsilon).sum()
R = | np.zeros(Z.shape) | numpy.zeros |
#
# Last modified on Thu Jan 31 10:27:11 PST 2002 by lindy
#
# $Header: /opt/cvs/python/packages/share1.5/mglutil/math/rmsdtest.py,v 1.4.12.1 2016/02/11 23:15:05 annao Exp $
#
"""Unit test for rmsd.py
Requirements for rmsd:
A. RMSDCalculator.__init__
0. should ..
B. RMSDCalculator.setRefCoords
0. should ..
C. RMSDCalculator.computeRMSD
1. should return known result with known input
2. raise ValueError for input of unlike dimensions
3. for two random sets of points, rmsd(x,y) == rmsd(y,x)
4. raise ValueError if the reference coords have not been set
D.
"""
from mglutil.math import rmsd
import unittest, math
import numpy
from numpy import random as RandomArray
class ComputedValues(unittest.TestCase):
decimals = 4 # decimal places to round to for float comparison
point_list_0 = numpy.zeros((5,3))
point_list_1 = numpy.ones( (5,3))
knowValues = ( (point_list_0, point_list_0, 0.0),
(point_list_1, point_list_1, 0.0),
(point_list_0, point_list_1, math.sqrt(3.0)),
(point_list_1, point_list_0, math.sqrt(3.0)))
def test_computeRMSD_KnowValues(self):
"""1. should return known result with known input"""
for ref, input, known in self.knowValues:
self.assertEqual(known,
rmsd.RMSDCalculator(ref).computeRMSD(input))
def test_computeRMSD_RandomOffset(self):
"""5. offset point by random value returns offset*sqrt(3)"""
min = -10000.
max = 10000.
num_points = 20
dimension = 3
point_list_1 = RandomArray.uniform(min, max, (num_points, dimension))
delta = point_list_1[0][0]
point_list_2 = point_list_1 + delta
answer = rmsd.RMSDCalculator(point_list_1).computeRMSD(point_list_2)
self.assertEqual(
round(answer, self.decimals),
round(abs(delta)*math.sqrt(3.0), self.decimals))
def test_computeRMSD_Random(self):
"""3. for two random sets of points, rmsd(x,y) == rmsd(y,x)"""
min = -10000.
max = 10000.
num_points = 20
dimension = 3
point_list_1 = RandomArray.uniform(min, max, (num_points, dimension))
point_list_2 = RandomArray.uniform(min, max, (num_points, dimension))
self.assertEqual(
rmsd.RMSDCalculator(point_list_1).computeRMSD(point_list_2),
rmsd.RMSDCalculator(point_list_2).computeRMSD(point_list_1))
class InputValues(unittest.TestCase):
point_list_0 = | numpy.zeros((3,3)) | numpy.zeros |
from __future__ import print_function
import gc, os, sys
import numpy as np
import scipy as sp
import numpy.linalg as la
import scipy.linalg as sla
from numpy.linalg import norm
from time import time
from copy import deepcopy
from warnings import warn
from time import time
from Florence.FiniteElements.Assembly import Assemble
from Florence import Mesh
from Florence.PostProcessing import PostProcess
from .StructuralDynamicIntegrator import StructuralDynamicIntegrator
__all__ = ["NonlinearImplicitStructuralDynamicIntegrator", "LinearImplicitStructuralDynamicIntegrator"]
class NonlinearImplicitStructuralDynamicIntegrator(StructuralDynamicIntegrator):
"""Implicit dynamic solver for nonlinear problems based on Newmark's beta
"""
def __init__(self):
super(NonlinearImplicitStructuralDynamicIntegrator, self).__init__()
self.gamma = 0.5
self.beta = 0.25
def Solver(self, function_spaces, formulation, solver,
K, M, NeumannForces, NodalForces, Residual,
mesh, TotalDisp, Eulerx, Eulerp, material, boundary_condition, fem_solver):
# COMPUTE DAMPING MATRIX BASED ON MASS
D = 0.0
if fem_solver.include_physical_damping:
D = fem_solver.damping_factor*M
# GET BOUNDARY CONDITIONS INFROMATION
self.GetBoundaryInfo(mesh, formulation, boundary_condition)
if formulation.fields == "electro_mechanics":
M_mech = M[self.mechanical_dofs,:][:,self.mechanical_dofs]
if fem_solver.include_physical_damping:
D_mech = D[self.mechanical_dofs,:][:,self.mechanical_dofs]
# INITIALISE VELOCITY AND ACCELERATION
velocities = np.zeros((mesh.points.shape[0],formulation.ndim))
accelerations = np.zeros((mesh.points.shape[0],formulation.ndim))
# COMPUTE INITIAL ACCELERATION FOR TIME STEP 0
if NeumannForces.ndim == 2 and NeumannForces.shape[1]>1:
InitResidual = Residual - NeumannForces[:,0][:,None]
else:
InitResidual = Residual
if formulation.fields == "electro_mechanics":
accelerations[:,:] = solver.Solve(M_mech, -InitResidual[self.mechanical_dofs].ravel()
).reshape(mesh.points.shape[0],formulation.ndim)
else:
accelerations[:,:] = solver.Solve(M, -InitResidual.ravel() ).reshape(mesh.points.shape[0],formulation.ndim)
self.NRConvergence = fem_solver.NRConvergence
LoadIncrement = fem_solver.number_of_load_increments
LoadFactor = fem_solver.total_time/LoadIncrement
AppliedDirichletInc = np.zeros(boundary_condition.applied_dirichlet.shape[0],dtype=np.float64)
save_counter = 1
nincr_last = float(LoadIncrement-1) if LoadIncrement !=1 else 1
if boundary_condition.compound_dirichlet_bcs:
ChangedTotalDisp = np.zeros((mesh.nnode, formulation.nvar))
# TIME LOOP
for Increment in range(1,LoadIncrement):
t_increment = time()
# GET INCREMENTAL DIRICHLET BC
if not boundary_condition.has_step_wise_dirichlet_loading:
if boundary_condition.applied_dirichlet.ndim == 2:
AppliedDirichletInc = boundary_condition.applied_dirichlet[:,Increment]
else:
if boundary_condition.make_loading == "ramp":
AppliedDirichletInc = boundary_condition.applied_dirichlet*(1.*Increment/LoadIncrement)
else:
AppliedDirichletInc = boundary_condition.applied_dirichlet/nincr_last
else:
boundary_condition.ApplyStepWiseDirichletFunc(formulation, mesh, increment=Increment)
self.GetBoundaryInfo(mesh, formulation, boundary_condition, increment=Increment)
AppliedDirichletInc = boundary_condition.applied_dirichlet
if self.bc_changed_at_this_step and boundary_condition.compound_dirichlet_bcs:
ChangedTotalDisp += np.copy(U)
# GET INCREMENTAL NEUMANN DIRICHLET BC
if not boundary_condition.has_step_wise_neumann_loading:
if NeumannForces.ndim == 2 and NeumannForces.shape[1]>1:
NodalForces = NeumannForces[:,Increment][:,None]
else:
if boundary_condition.make_loading == "ramp":
NodalForces = NeumannForces*(1.*Increment/LoadIncrement)
else:
NodalForces = NeumannForces/nincr_last
else:
NodalForces = boundary_condition.ApplyStepWiseNeumannFunc(formulation, mesh,
material, increment=Increment)
NodalForces = NodalForces.ravel()[:,None]
# OBRTAIN INCREMENTAL RESIDUAL - CONTRIBUTION FROM BOTH NEUMANN AND DIRICHLET
# OLD WAY - RESIDUAL WAS GETTING CARRIED OVER FROM PREV NR STEP BUT AT THIS
# POINT IT WAS TINY (AS NR HAD CONVERGED) THAT IT DIDN'T MATTER AND WORKED AS EXPECTED
# Residual = -boundary_condition.ApplyDirichletGetReducedMatrices(K,Residual,
# AppliedDirichletInc,LoadFactor=1.0,mass=M,only_residual=True)
# ACTUAL WAY
Residual = -boundary_condition.ApplyDirichletGetReducedMatrices(K,np.zeros_like(Residual),
AppliedDirichletInc,LoadFactor=1.0,mass=M,only_residual=True)
Residual -= NodalForces
# COMPUTE INITIAL ACCELERATION - ONLY NEEDED IN CASES OF PRESTRETCHED CONFIGURATIONS
# accelerations[:,:] = solver.Solve(M, Residual.ravel() - \
# K.dot(TotalDisp[:,:,Increment].ravel())).reshape(mesh.points.shape[0],formulation.nvar)
# LET NORM OF THE FIRST RESIDUAL BE THE NORM WITH RESPECT TO WHICH WE
# HAVE TO CHECK THE CONVERGENCE OF NEWTON RAPHSON. TYPICALLY THIS IS
# NORM OF NODAL FORCES
if Increment==1:
self.NormForces = np.linalg.norm(Residual)
# AVOID DIVISION BY ZERO
if np.isclose(self.NormForces,0.0):
self.NormForces = 1e-14
self.norm_residual = np.linalg.norm(Residual)/self.NormForces
Eulerx, Eulerp, K, Residual, velocities, accelerations = self.NewtonRaphson(function_spaces, formulation, solver,
Increment, K, D, M, NodalForces, Residual, mesh, Eulerx, Eulerp,
material,boundary_condition,AppliedDirichletInc, fem_solver, velocities, accelerations)
# UPDATE DISPLACEMENTS FOR THE CURRENT LOAD INCREMENT
U = np.zeros((mesh.points.shape[0], formulation.nvar))
U[:,:formulation.ndim] = Eulerx - mesh.points
if formulation.fields == "electro_mechanics":
U[:,-1] = Eulerp
# SAVE RESULTS
if Increment % fem_solver.save_frequency == 0 or\
(Increment == LoadIncrement - 1 and save_counter<TotalDisp.shape[2]):
TotalDisp[:,:,save_counter] = U
if boundary_condition.compound_dirichlet_bcs:
TotalDisp[:,:,save_counter] += ChangedTotalDisp
save_counter += 1
# COMPUTE DISSIPATION OF ENERGY THROUGH TIME
if fem_solver.compute_energy_dissipation:
energy_info = self.ComputeEnergyDissipation(function_spaces[0], mesh, material, formulation, fem_solver,
Eulerx, U, NodalForces, M, velocities)
formulation.energy_dissipation.append(energy_info[0])
formulation.internal_energy.append(energy_info[1])
formulation.kinetic_energy.append(energy_info[2])
formulation.external_energy.append(energy_info[3])
# COMPUTE DISSIPATION OF LINEAR MOMENTUM THROUGH TIME
if fem_solver.compute_linear_momentum_dissipation:
power_info = self.ComputePowerDissipation(function_spaces[0], mesh, material, formulation, fem_solver,
Eulerx, U, NodalForces, M, velocities, accelerations)
formulation.power_dissipation.append(power_info[0])
formulation.internal_power.append(power_info[1])
formulation.kinetic_power.append(power_info[2])
formulation.external_power.append(power_info[3])
# LOG IF ASKED FOR
self.LogSave(fem_solver, formulation, U[:,:formulation.ndim], Eulerp, Increment)
print('\nFinished Load increment', Increment, 'in', time()-t_increment, 'seconds')
try:
print('Norm of Residual is',
np.abs(la.norm(Residual[boundary_condition.columns_in])/self.NormForces), '\n')
except RuntimeWarning:
print("Invalid value encountered in norm of Newton-Raphson residual")
# STORE THE INFORMATION IF NEWTON-RAPHSON FAILS
if fem_solver.newton_raphson_failed_to_converge:
solver.condA = np.NAN
TotalDisp = TotalDisp[:,:,:save_counter-1]
fem_solver.number_of_load_increments = save_counter - 1
break
# BREAK AT A SPECIFICED LOAD INCREMENT IF ASKED FOR
if fem_solver.break_at_increment != -1 and fem_solver.break_at_increment is not None:
if fem_solver.break_at_increment == Increment:
if fem_solver.break_at_increment < LoadIncrement - 1:
print("\nStopping at increment {} as specified\n\n".format(Increment))
TotalDisp = TotalDisp[:,:,:save_counter]
fem_solver.number_of_load_increments = save_counter
break
if fem_solver.save_frequency != 1:
if TotalDisp.shape[2] > save_counter:
# IN CASE SOLVER BLEW UP
TotalDisp = TotalDisp[:,:,:save_counter]
fem_solver.number_of_load_increments = TotalDisp.shape[2]
else:
fem_solver.number_of_load_increments = save_counter
return TotalDisp
def NewtonRaphson(self, function_spaces, formulation, solver,
Increment, K, D, M, NodalForces, Residual, mesh, Eulerx, Eulerp, material,
boundary_condition, AppliedDirichletInc, fem_solver, velocities, accelerations):
Tolerance = fem_solver.newton_raphson_tolerance
LoadIncrement = fem_solver.number_of_load_increments
LoadFactor = fem_solver.total_time/fem_solver.number_of_load_increments
Iter = 0
self.iterative_norm_history = []
# EulerxPrev = np.copy(Eulerx)
# EulerVPrev = np.copy(velocities[:,:,Increment-1])
# EulerAPrev = np.copy(accelerations[:,:,Increment-1])
# PREDICTOR STEP
tmpV = (1. - self.gamma/self.beta)*velocities + (1. - self.gamma/2./self.beta)*LoadFactor*accelerations
tmpA = (-1./self.beta/LoadFactor)*velocities - (1./2./self.beta)*(1.- 2.*self.beta)*accelerations
velocities = tmpV
accelerations = tmpA
if formulation.fields == "electro_mechanics":
M_mech = M[self.mechanical_dofs,:][:,self.mechanical_dofs]
InertiaResidual = np.zeros((Residual.shape[0],1))
InertiaResidual[self.mechanical_dofs,0] = M_mech.dot(accelerations.ravel())
if fem_solver.include_physical_damping:
D_mech = D[self.mechanical_dofs,:][:,self.mechanical_dofs]
InertiaResidual[self.mechanical_dofs,0] += D_mech.dot(velocities.ravel())
else:
InertiaResidual = np.zeros((Residual.shape[0],1))
InertiaResidual[:,0] = M.dot(accelerations.ravel())
if fem_solver.include_physical_damping:
InertiaResidual[:,0] += D.dot(velocities.ravel())
Residual[boundary_condition.columns_in] += InertiaResidual[boundary_condition.columns_in]
# APPLY INCREMENTAL DIRICHLET PER LOAD STEP (THIS IS INCREMENTAL NOT ACCUMULATIVE)
IncDirichlet = boundary_condition.UpdateFixDoFs(AppliedDirichletInc,
K.shape[0],formulation.nvar)
# UPDATE EULERIAN COORDINATE
# Eulerx += IncDirichlet[:,:formulation.ndim]
Eulerx[:,:] = mesh.points + IncDirichlet[:,:formulation.ndim]
Eulerp[:] = IncDirichlet[:,-1] # ENSURES Eulerp IS CONTIGUOUS - NECESSARY FOR LOW-LEVEL DISPATCHER
while np.abs(la.norm(Residual[boundary_condition.columns_in])/self.NormForces) > Tolerance or Iter==0:
# GET EFFECTIVE STIFFNESS
# K += (1./self.beta/LoadFactor**2)*M
K += (self.gamma/self.beta/LoadFactor)*D + (1./self.beta/LoadFactor**2)*M
# GET THE REDUCED SYSTEM OF EQUATIONS
K_b, F_b, _ = boundary_condition.GetReducedMatrices(K,Residual)
# SOLVE THE SYSTEM
sol = solver.Solve(K_b,-F_b)
# GET ITERATIVE SOLUTION
dU = boundary_condition.UpdateFreeDoFs(sol,K.shape[0],formulation.nvar)
# UPDATE THE EULERIAN COMPONENTS
# UPDATE THE GEOMETRY
Eulerx += dU[:,:formulation.ndim]
# GET ITERATIVE ELECTRIC POTENTIAL
Eulerp += dU[:,-1]
# UPDATE VELOCITY AND ACCELERATION
velocities += self.gamma/self.beta/LoadFactor*dU[:,:formulation.ndim]
accelerations += 1./self.beta/LoadFactor**2*dU[:,:formulation.ndim]
# OR ALTERNATIVELY
# dumA = 1./self.beta/LoadFactor**2*(Eulerx - EulerxPrev) -\
# 1./self.beta/LoadFactor*(EulerVPrev) -\
# 1./2./self.beta*(1. - 2.*self.beta)*(EulerAPrev)
# dumV = (1. - self.gamma/self.beta)*(EulerVPrev) +\
# (1. - self.gamma/2./self.beta)*LoadFactor*(EulerAPrev) +\
# self.gamma/self.beta/LoadFactor*(Eulerx - EulerxPrev)
# velocities = dumV
# accelerations = dumA
# RE-ASSEMBLE - COMPUTE STIFFNESS AND INTERNAL TRACTION FORCES
K, TractionForces, _, _ = Assemble(fem_solver,function_spaces[0], formulation, mesh, material,
Eulerx, Eulerp)
# FIND INITIAL RESIDUAL
if formulation.fields == "electro_mechanics":
InertiaResidual = np.zeros((TractionForces.shape[0],1))
InertiaResidual[self.mechanical_dofs,0] = M_mech.dot(accelerations.ravel())
if fem_solver.include_physical_damping:
InertiaResidual[self.mechanical_dofs,0] += D_mech.dot(velocities.ravel())
else:
InertiaResidual = np.zeros((TractionForces.shape[0],1))
InertiaResidual[:,0] = M.dot(accelerations.ravel())
if fem_solver.include_physical_damping:
InertiaResidual[:,0] += D.dot(velocities.ravel())
# UPDATE RESIDUAL
Residual[boundary_condition.columns_in] = TractionForces[boundary_condition.columns_in] \
- NodalForces[boundary_condition.columns_in] + InertiaResidual[boundary_condition.columns_in]
# SAVE THE NORM
self.abs_norm_residual = la.norm(Residual[boundary_condition.columns_in])
if Iter==0:
self.NormForces = la.norm(Residual[boundary_condition.columns_in])
self.norm_residual = np.abs(la.norm(Residual[boundary_condition.columns_in])/self.NormForces)
# SAVE THE NORM
self.NRConvergence['Increment_'+str(Increment)] = np.append(self.NRConvergence['Increment_'+str(Increment)],\
self.norm_residual)
print("Iteration {} for increment {}.".format(Iter, Increment) +\
" Residual (abs) {0:>16.7g}".format(self.abs_norm_residual),
"\t Residual (rel) {0:>16.7g}".format(self.norm_residual))
# BREAK BASED ON RELATIVE NORM
if np.abs(self.abs_norm_residual) < Tolerance:
break
# BREAK BASED ON INCREMENTAL SOLUTION - KEEP IT AFTER UPDATE
if norm(dU) <= fem_solver.newton_raphson_solution_tolerance:
print("Incremental solution within tolerance i.e. norm(dU): {}".format(norm(dU)))
break
# UPDATE ITERATION NUMBER
Iter +=1
if Iter==fem_solver.maximum_iteration_for_newton_raphson and formulation.fields == "electro_mechanics":
raise StopIteration("\n\nNewton Raphson did not converge! Maximum number of iterations reached.")
if Iter==fem_solver.maximum_iteration_for_newton_raphson:
fem_solver.newton_raphson_failed_to_converge = True
break
if np.isnan(self.norm_residual) or self.norm_residual>1e06:
fem_solver.newton_raphson_failed_to_converge = True
break
# IF BREAK WHEN NEWTON RAPHSON STAGNATES IS ACTIVATED
if fem_solver.break_at_stagnation:
self.iterative_norm_history.append(self.norm_residual)
if Iter >= 5 and self.abs_norm_residual<1e06:
if np.mean(self.iterative_norm_history) < 1.:
break
# USER DEFINED CRITERIA TO BREAK OUT OF NEWTON-RAPHSON
if fem_solver.user_defined_break_func != None:
if fem_solver.user_defined_break_func(Increment,Iter,self.norm_residual,self.abs_norm_residual, Tolerance):
break
# USER DEFINED CRITERIA TO STOP NEWTON-RAPHSON AND THE WHOLE ANALYSIS
if fem_solver.user_defined_stop_func != None:
if fem_solver.user_defined_stop_func(Increment,Iter,self.norm_residual,self.abs_norm_residual, Tolerance):
fem_solver.newton_raphson_failed_to_converge = True
break
return Eulerx, Eulerp, K, Residual, velocities, accelerations
#------------------------------------------ LINEAR IMPLICIT SOLVER ----------------------------------------------#
#----------------------------------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------------------------------------------#
class LinearImplicitStructuralDynamicIntegrator(StructuralDynamicIntegrator):
"""Implicit dynamic solver for linear problems based on Newmark's beta
"""
def __init__(self,**kwargs):
super(LinearImplicitStructuralDynamicIntegrator, self).__init__()
self.lump_rhs = False
self.gamma = 0.5
self.beta = 0.25
def Solver(self, function_spaces, formulation, solver,
K, M, NeumannForces, NodalForces, Residual,
mesh, TotalDisp, Eulerx, Eulerp, material, boundary_condition, fem_solver):
# CHECK FORMULATION
if formulation.fields != "mechanics" and formulation.fields != "electro_mechanics":
raise NotImplementedError("Linear implicit solver for {} is not available".format(formulation.fields))
if formulation.fields == "electro_mechanics":
warn("Linear implicit solver for electromechanics formulation is not thoroughly checked and may return incorrect results. "
"Please use nonlinear explicit dynamic solver instead")
# GET BOUNDARY CONDITIONS INFROMATION
self.GetBoundaryInfo(mesh, formulation, boundary_condition)
LoadIncrement = fem_solver.number_of_load_increments
LoadFactor = fem_solver.total_time/LoadIncrement
post_process = PostProcess(formulation.ndim,formulation.nvar)
post_process.SetAnalysis(analysis_type=fem_solver.analysis_type, analysis_nature=fem_solver.analysis_nature)
if NeumannForces.ndim == 2 and NeumannForces.shape[1]==1:
tmp = np.zeros((NeumannForces.shape[0],LoadIncrement))
tmp[:,0] = NeumannForces[:,0]
NeumannForces = tmp
dU = boundary_condition.UpdateFixDoFs(boundary_condition.applied_dirichlet[:,0],
mesh.points.shape[0]*formulation.nvar, formulation.nvar)
TotalDisp[:,:formulation.nvar,0] = dU
# INITIALISE VELOCITY AND ACCELERATION
velocities = np.zeros((mesh.points.shape[0]*formulation.ndim))
accelerations = np.zeros((mesh.points.shape[0]*formulation.ndim))
# COMPUTE DAMPING MATRIX BASED ON MASS
D = 0.0
if fem_solver.include_physical_damping:
D = fem_solver.damping_factor*M
if formulation.fields == "electro_mechanics":
M_mech = M[self.mechanical_dofs,:][:,self.mechanical_dofs]
if fem_solver.include_physical_damping:
D_mech = D[self.mechanical_dofs,:][:,self.mechanical_dofs]
else:
M_mech = M
D_mech = D
# COMPUTE INITIAL ACCELERATION FOR TIME STEP 0
Residual = np.zeros_like(Residual)
InitResidual = Residual + NeumannForces[:,0][:,None]
if formulation.fields == "electro_mechanics":
accelerations[:] = solver.Solve(M_mech, -InitResidual[self.mechanical_dofs].ravel())
else:
accelerations[:] = solver.Solve(M, InitResidual.ravel() )
# COMPUTE AUGMENTED K (INCLUDES INERTIA EFFECT)
K += (self.gamma/self.beta/LoadFactor)*D + (1./self.beta/LoadFactor**2)*M
# GET REDUCED VARIABLES
K_b, F_b, _ = boundary_condition.GetReducedMatrices(K,Residual)
if self.lump_rhs:
M_mech = M_mech.sum(axis=1).A.ravel() # FOR CSR
# M_mech = M_mech.sum(axis=0).ravel() # FOR CSC
if self.include_physical_damping:
D_mech = D_mech.sum(axis=1).A.ravel()
reuse_factorisation = False if formulation.fields == "electro_mechanics" else True
for Increment in range(1,LoadIncrement):
t_increment=time()
# FIXED INCREMENTAL DIRICHLET
AppliedDirichletInc = boundary_condition.applied_dirichlet[:,Increment-1]
# APPLY NEUMANN BOUNDARY CONDITIONS
DeltaF = NeumannForces[:,Increment][:,None]
NodalForces = DeltaF
# ACCUMULATED FORCE
if fem_solver.include_physical_damping:
if self.lump_rhs:
Residual[self.mechanical_dofs,0] = (1./self.beta/LoadFactor**2)*M_mech*TotalDisp[:,:formulation.ndim,Increment-1].ravel() +\
(1./self.beta/LoadFactor)*M_mech*velocities + (0.5/self.beta - 1.)*M_mech*accelerations +\
(self.gamma/self.beta/LoadFactor)*D_mech*TotalDisp[:,:formulation.ndim,Increment-1].ravel() +\
(self.gamma/self.beta - 1.)*D_mech*velocities -\
LoadFactor*((1-self.gamma)-self.gamma*(0.5/self.beta - 1.))*D_mech*accelerations
else:
Residual[self.mechanical_dofs,0] = (1./self.beta/LoadFactor**2)*M_mech.dot(TotalDisp[:,:formulation.ndim,Increment-1].ravel()) +\
(1./self.beta/LoadFactor)*M_mech.dot(velocities) + (0.5/self.beta - 1.)*M_mech.dot(accelerations) +\
(self.gamma/self.beta/LoadFactor)*D_mech.dot(TotalDisp[:,:formulation.ndim,Increment-1].ravel()) +\
(self.gamma/self.beta - 1.)*D_mech.dot(velocities) -\
LoadFactor*((1-self.gamma)-self.gamma*(0.5/self.beta - 1.))*D_mech.dot(accelerations)
else:
if self.lump_rhs:
Residual[self.mechanical_dofs,0] = (1./self.beta/LoadFactor**2)*M_mech*TotalDisp[:,:formulation.ndim,Increment-1].ravel() +\
(1./self.beta/LoadFactor)*M_mech*velocities + (0.5/self.beta - 1.)*M_mech*accelerations
else:
Residual[self.mechanical_dofs,0] = (1./self.beta/LoadFactor**2)*M_mech.dot(TotalDisp[:,:formulation.ndim,Increment-1].ravel()) +\
(1./self.beta/LoadFactor)*M_mech.dot(velocities) + (0.5/self.beta - 1.)*M_mech.dot(accelerations)
Residual += DeltaF
if formulation.fields == "electro_mechanics":
K = Assemble(fem_solver,function_spaces[0], formulation, mesh, material, Eulerx, Eulerp)[0]
K += (self.gamma/self.beta/LoadFactor)*D + (1./self.beta/LoadFactor**2)*M
# CHECK CONTACT AND ASSEMBLE IF DETECTED
if fem_solver.has_contact:
Eulerx = mesh.points + TotalDisp[:,:formulation.ndim,Increment-1]
TractionForcesContact = | np.zeros_like(Residual) | numpy.zeros_like |
import numpy as np
import copy
from .math_tools import rpy2Mat
import time
from itertools import permutations
class BasicGraspingPrimitive():
def __init__(self, kinematics, params, object):
self.kinematics = kinematics
self.params = params
self.object = object
## viapoints
self.viapoints = None
self.goal_target = None
# Reference variable
## Variable to measure primitive progress and satisfy
self.error = 9999.
self.error_matrix = np.ones((3,3))*999.
self.d_error = 9999.
self.error_thrs = 0.003
self.d_error_thrs = 0.0001
self.v_thrs = 0.0001
self.DEBUG = True
def start(self, cube_state, des_cube_state, robot_state):
self.viapoints = self.get_grasping_viapoints(cube_state , robot_state)
def combinatorial_matching(self, fingers_poses, cube_targets):
'Cube targets is provided as a list of candidates. We wanna find optimal candidate + optimal matching by intensive Search'
best_desired_poses = cube_targets[0]
best_perm = [0,1,2]
d_error = 1000000000
perm = permutations([0,1,2])
for cube_target in cube_targets:
for idx in perm:
er = np.mean((fingers_poses - cube_target[:,idx])**2)
if er< d_error:
d_error = er
best_desired_poses = cube_target
best_perm = idx
print(best_perm)
return best_desired_poses[:,best_perm], best_perm, best_desired_poses
def get_grasping_viapoints(self, cube_state , robot_state, center=True):
## Set State ##
cube_pose = cube_state[0]
robot_xyz = np.stack(robot_state[2])
x_off = 1.4
pos_f_c = np.array([[x_off,0.,0.],[-x_off,0.5,0.],[-x_off,-0.5,0.],[-x_off,0.,0.],[x_off,-0.5,0.],[x_off,0.5,0.],
[0.,0.,x_off],[0.,0.5,-x_off],[0.,-0.5,-x_off],[0.,0.,-x_off],[0.,-0.5,x_off],[0,0.5,x_off]]).T
pos_f_w = self.object.in_world(cube_state=cube_state, pos_c = pos_f_c, offset_prop = True)
pos_f_candidates = [pos_f_w[:,:3], pos_f_w[:,3:6],pos_f_w[:,6:9], pos_f_w[:,9:]]
candidates_clean = []
for candidate in pos_f_candidates:
check = np.sum(candidate[2,:]<0.)
if check<1:
candidates_clean.append(candidate)
if len(candidates_clean)>0:
self.goal_target, _, __ = self.combinatorial_matching(fingers_poses=robot_xyz, cube_targets=pos_f_candidates)
#print('fingers poses in world frame: ', goal_target)
#time.sleep(60)
return [self.goal_target]
def is_satisfied(self, cube_state, des_cube_state, robot_state):
robot_state_np = np.stack((robot_state[2][0], robot_state[2][1], robot_state[2][2]), 1)
current_error_matrix = (self.goal_target - robot_state_np) ** 2
current_error = np.mean(current_error_matrix)
self.d_error = np.abs(self.error - current_error)
self.error_matrix = current_error_matrix
self.error = current_error
if self.DEBUG:
print('Current Goal is:', self.goal_target)
print('Current Robot State in XYZ is:', robot_state[2])
print('current_error is ', current_error)
print('cube state is:', cube_state[2])
return False
def compute_goal_pose(self, cube_state, des_cube_state, robot_state):
self.viapoints = self.get_grasping_viapoints(cube_state , robot_state)
target = self.viapoints[0]
#target = np.stack(robot_state[2])
#print('Viapoint is:', target)
self.goal_target = target
#print(target)
return target
def grasping_gains(self):
pass
def unit_vector(self, vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def skew(self, vector):
"""
this function returns a numpy array with the skew symmetric cross product matrix for vector.
the skew symmetric cross product matrix is defined such that
np.cross(a, b) = np.dot(skew(a), b)
:param vector: An array like vector to create the skew symmetric cross product matrix for
:return: A numpy array of the skew symmetric cross product vector
"""
return np.array([[0, -vector[2], vector[1]],
[vector[2], 0, -vector[0]],
[-vector[1], vector[0], 0]])
def assign_y(self, edge_arr, p0, p1, p2, direction):
asign1, cent1, dist1 = self.assign_y_internal(edge_arr, p0, p1, p2, direction)
asign2, cent2, dist2 = self.assign_y_internal(edge_arr, p0, p2, p1, direction)
if (dist1 <= dist2):
return asign1, cent1
else:
# print ("before" + str(asign2))
asign2[1:] = np.flipud(asign2[1:])
# print("after" + str(asign2))
cent2[1:] = np.flipud(cent2[1:])
return asign2, cent2
def assign_y_internal(self, edge_arr, p0, p1, p2, direction):
max_dist = 1
add_grasp1 = max_dist - 1
add_grasp2 = max_dist + 1
if (add_grasp1 > 3):
add_grasp1 = 0
if (add_grasp1 < 0):
add_grasp1 = 3
if (add_grasp2 > 3):
add_grasp2 = 0
if (add_grasp2 < 0):
add_grasp2 = 3
pot_ret = [[max_dist, add_grasp1, add_grasp2], [add_grasp2, max_dist, add_grasp1],
[add_grasp1, add_grasp2, max_dist]]
cent = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
config1 = np.sum((edge_arr[:, max_dist] - p0) ** 2 + (edge_arr[:, add_grasp1] - p1) ** 2 + (
edge_arr[:, add_grasp2] - p2) ** 2)
config2 = np.sum((edge_arr[:, max_dist] - p1) ** 2 + (edge_arr[:, add_grasp1] - p2) ** 2 + (
edge_arr[:, add_grasp2] - p0) ** 2)
config3 = np.sum((edge_arr[:, max_dist] - p2) ** 2 + (edge_arr[:, add_grasp1] - p0) ** 2 + (
edge_arr[:, add_grasp2] - p1) ** 2)
dist_arr = [config1, config2, config3]
dec = np.argmin([config1, config2, config3])
dist_1 = copy.deepcopy(dist_arr[dec])
pot_ret_1 = copy.deepcopy(pot_ret[dec])
cent_1 = copy.deepcopy(cent[dec])
max_dist = 3
add_grasp1 = max_dist - 1
add_grasp2 = max_dist + 1
if (add_grasp1 > 3):
add_grasp1 = 0
if (add_grasp1 < 0):
add_grasp1 = 3
if (add_grasp2 > 3):
add_grasp2 = 0
if (add_grasp2 < 0):
add_grasp2 = 3
pot_ret = [[max_dist, add_grasp1, add_grasp2], [add_grasp2, max_dist, add_grasp1],
[add_grasp1, add_grasp2, max_dist]]
cent = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
config1 = np.sum((edge_arr[:, max_dist] - p0) ** 2 + (edge_arr[:, add_grasp1] - p1) ** 2 + (
edge_arr[:, add_grasp2] - p2) ** 2)
config2 = np.sum((edge_arr[:, max_dist] - p1) ** 2 + (edge_arr[:, add_grasp1] - p2) ** 2 + (
edge_arr[:, add_grasp2] - p0) ** 2)
config3 = np.sum((edge_arr[:, max_dist] - p2) ** 2 + (edge_arr[:, add_grasp1] - p0) ** 2 + (
edge_arr[:, add_grasp2] - p1) ** 2)
dist_arr = [config1, config2, config3]
dec = np.argmin([config1, config2, config3])
dist_2 = copy.deepcopy(dist_arr[dec])
pot_ret_2 = copy.deepcopy(pot_ret[dec])
cent_2 = copy.deepcopy(cent[dec])
moment_dir = np.dot(self.skew(self.unit_vector(edge_arr[:, 0]-edge_arr[:, 2])),self.unit_vector(edge_arr[:, 1]-edge_arr[:, 3]))
if (direction==-1 or direction==1):
if (np.sign(moment_dir[2])*np.sign(direction)==1):
return pot_ret_1, cent_1, dist_1
else:
return pot_ret_2, cent_2, dist_2
# if (direction==-1):
# return pot_ret_1, cent_1, dist_1
# elif (direction==1):
# return pot_ret_2, cent_2, dist_2
if (dist_1 < dist_2):
return pot_ret_1, cent_1, dist_1
else:
return pot_ret_2, cent_2, dist_2
def assign_x_internal(self, edge_arr, p0, p1, p2, direction):
max_dist = 0
add_grasp1 = max_dist-1
add_grasp2 = max_dist+1
if (add_grasp1>3):
add_grasp1 = 0
if (add_grasp1<0):
add_grasp1 = 3
if (add_grasp2>3):
add_grasp2 = 0
if (add_grasp2<0):
add_grasp2 = 3
pot_ret = [[max_dist,add_grasp1,add_grasp2],[add_grasp2,max_dist,add_grasp1],[add_grasp1,add_grasp2,max_dist]]
cent = [[1,0,0],[0,1,0],[0,0,1]]
config1 = np.sum((edge_arr[:,max_dist]-p0)**2+(edge_arr[:,add_grasp1]-p1)**2+(edge_arr[:,add_grasp2]-p2)**2)
config2 = np.sum((edge_arr[:,max_dist]-p1)**2+(edge_arr[:,add_grasp1]-p2)**2+(edge_arr[:,add_grasp2]-p0)**2)
config3 = np.sum((edge_arr[:,max_dist]-p2)**2+(edge_arr[:,add_grasp1]-p0)**2+(edge_arr[:,add_grasp2]-p1)**2)
dist_arr = [config1, config2, config3]
dec = np.argmin([config1, config2, config3])
dist_1 = copy.deepcopy(dist_arr[dec])
pot_ret_1 = copy.deepcopy(pot_ret[dec])
cent_1 = copy.deepcopy(cent[dec])
max_dist = 2
add_grasp1 = max_dist-1
add_grasp2 = max_dist+1
if (add_grasp1>3):
add_grasp1 = 0
if (add_grasp1<0):
add_grasp1 = 3
if (add_grasp2>3):
add_grasp2 = 0
if (add_grasp2<0):
add_grasp2 = 3
pot_ret = [[max_dist,add_grasp1,add_grasp2],[add_grasp2,max_dist,add_grasp1],[add_grasp1,add_grasp2,max_dist]]
cent = [[1,0,0],[0,1,0],[0,0,1]]
config1 = np.sum((edge_arr[:,max_dist]-p0)**2+(edge_arr[:,add_grasp1]-p1)**2+(edge_arr[:,add_grasp2]-p2)**2)
config2 = np.sum((edge_arr[:,max_dist]-p1)**2+(edge_arr[:,add_grasp1]-p2)**2+(edge_arr[:,add_grasp2]-p0)**2)
config3 = np.sum((edge_arr[:,max_dist]-p2)**2+(edge_arr[:,add_grasp1]-p0)**2+(edge_arr[:,add_grasp2]-p1)**2)
dist_arr = [config1, config2, config3]
dec = np.argmin([config1, config2, config3])
dist_2 = copy.deepcopy(dist_arr[dec])
pot_ret_2 = copy.deepcopy(pot_ret[dec])
cent_2 = copy.deepcopy(cent[dec])
moment_dir = np.dot(self.skew(self.unit_vector(edge_arr[:, 1]-edge_arr[:, 3])),self.unit_vector(edge_arr[:, 0]-edge_arr[:, 2]))
if (direction==-1 or direction==1):
if (np.sign(moment_dir[2])*np.sign(direction)==1):
return pot_ret_1, cent_1, dist_1
else:
return pot_ret_2, cent_2, dist_2
# if (direction==1):
# return pot_ret_1, cent_1, dist_1
# elif (direction==-1):
# return pot_ret_2, cent_2, dist_2
if (dist_1<dist_2):
return pot_ret_1, cent_1, dist_1
else:
return pot_ret_2, cent_2, dist_2
def assign_x(self, edge_arr, p0, p1, p2, direction):
asign1, cent1, dist1 = self.assign_x_internal(edge_arr, p0, p1, p2, direction)
asign2, cent2, dist2 = self.assign_x_internal(edge_arr, p0, p2, p1, direction)
if (dist1<=dist2):
return asign1, cent1
else:
asign2[1:] = np.flipud(asign2[1:])
cent2[1:] = np.flipud(cent2[1:])
return asign2, cent2
def assign_target_old(self, edge_arr, target, p0, p1, p2):
dist_from_target = (np.sum((edge_arr-target.reshape(3,1))**2,axis=0))
max_dist = np.argmax(dist_from_target)
add_grasp1 = max_dist-1
add_grasp2 = max_dist+1
if (add_grasp1>3):
add_grasp1 = 0
if (add_grasp1<0):
add_grasp1 = 3
if (add_grasp2>3):
add_grasp2 = 0
if (add_grasp2<0):
add_grasp2 = 3
pot_ret = [[max_dist,add_grasp1,add_grasp2],[add_grasp2,max_dist,add_grasp1],[add_grasp1,add_grasp2,max_dist]]
cent = [[1,0,0],[0,1,0],[0,0,1]]
config1 = | np.sum((edge_arr[:,max_dist]-p0)**2+(edge_arr[:,add_grasp1]-p1)**2+(edge_arr[:,add_grasp2]-p2)**2) | numpy.sum |
import os
import time
import warnings
from contextlib import contextmanager
from typing import Any, Dict, Iterator, Optional
import numpy as np
import pybullet as p
import pybullet_data
import pybullet_utils.bullet_client as bc
import panda_gym.assets
class PyBullet:
"""Convenient class to use PyBullet physics engine.
Args:
render (bool, optional): Enable rendering. Defaults to False.
n_substeps (int, optional): Number of sim substep when step() is called. Defaults to 20.
background_color (np.ndarray, optional): The background color as (red, green, blue).
Defaults to np.array([223, 54, 45]).
"""
def __init__(
self, render: bool = False, n_substeps: int = 20, background_color: np.ndarray = np.array([223.0, 54.0, 45.0])
) -> None:
self.background_color = background_color.astype(np.float64) / 255
options = "--background_color_red={} \
--background_color_green={} \
--background_color_blue={}".format(
*self.background_color
)
self.connection_mode = p.GUI if render else p.DIRECT
self.physics_client = bc.BulletClient(connection_mode=self.connection_mode, options=options)
self.physics_client.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
self.physics_client.configureDebugVisualizer(p.COV_ENABLE_MOUSE_PICKING, 0)
self.n_substeps = n_substeps
self.timestep = 1.0 / 500
self.physics_client.setTimeStep(self.timestep)
self.physics_client.resetSimulation()
self.physics_client.setAdditionalSearchPath(pybullet_data.getDataPath())
self.physics_client.setGravity(0, 0, -9.81)
self._bodies_idx = {}
@property
def dt(self):
"""Timestep."""
return self.timestep * self.n_substeps
def step(self) -> None:
"""Step the simulation."""
for _ in range(self.n_substeps):
self.physics_client.stepSimulation()
def close(self) -> None:
"""Close the simulation."""
self.physics_client.disconnect()
def render(
self,
mode: str = "human",
width: int = 720,
height: int = 480,
target_position: np.ndarray = | np.zeros(3) | numpy.zeros |
import argparse
import json
import logging
import os
import re
import sys
from asyncio import Future
from decimal import Decimal
from hashlib import md5, sha1
from io import StringIO
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Generator,
List,
Optional,
Set,
TYPE_CHECKING,
Text,
Tuple,
Union,
)
import aiohttp
import numpy as np
import rasa.utils.io as io_utils
from aiohttp import InvalidURL
from rasa.constants import DEFAULT_SANIC_WORKERS, ENV_SANIC_WORKERS
# backwards compatibility 1.0.x
# noinspection PyUnresolvedReferences
from rasa.core.lock_store import LockStore, RedisLockStore
from rasa.utils.endpoints import EndpointConfig, read_endpoint_config
from sanic import Sanic
from sanic.views import CompositionView
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from random import Random
def configure_file_logging(logger_obj: logging.Logger, log_file: Optional[Text]):
if not log_file:
return
formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
file_handler = logging.FileHandler(log_file, encoding=io_utils.DEFAULT_ENCODING)
file_handler.setLevel(logger_obj.level)
file_handler.setFormatter(formatter)
logger_obj.addHandler(file_handler)
def module_path_from_instance(inst: Any) -> Text:
"""Return the module path of an instance's class."""
return inst.__module__ + "." + inst.__class__.__name__
def subsample_array(
arr: List[Any],
max_values: int,
can_modify_incoming_array: bool = True,
rand: Optional["Random"] = None,
) -> List[Any]:
"""Shuffles the array and returns `max_values` number of elements."""
import random
if not can_modify_incoming_array:
arr = arr[:]
if rand is not None:
rand.shuffle(arr)
else:
random.shuffle(arr)
return arr[:max_values]
def is_int(value: Any) -> bool:
"""Checks if a value is an integer.
The type of the value is not important, it might be an int or a float."""
# noinspection PyBroadException
try:
return value == int(value)
except Exception:
return False
def one_hot(hot_idx: int, length: int, dtype: Optional[Text] = None) -> np.array:
if hot_idx >= length:
raise ValueError(
"Can't create one hot. Index '{}' is out "
"of range (length '{}')".format(hot_idx, length)
)
r = np.zeros(length, dtype)
r[hot_idx] = 1
return r
def str_range_list(start: int, end: int) -> List[Text]:
return [str(e) for e in range(start, end)]
def generate_id(prefix: Text = "", max_chars: Optional[int] = None) -> Text:
import uuid
gid = uuid.uuid4().hex
if max_chars:
gid = gid[:max_chars]
return f"{prefix}{gid}"
def request_input(
valid_values: Optional[List[Text]] = None,
prompt: Optional[Text] = None,
max_suggested: int = 3,
) -> Text:
def wrong_input_message():
print(
"Invalid answer, only {}{} allowed\n".format(
", ".join(valid_values[:max_suggested]),
",..." if len(valid_values) > max_suggested else "",
)
)
while True:
try:
input_value = input(prompt) if prompt else input()
if valid_values is not None and input_value not in valid_values:
wrong_input_message()
continue
except ValueError:
wrong_input_message()
continue
return input_value
# noinspection PyPep8Naming
class HashableNDArray:
"""Hashable wrapper for ndarray objects.
Instances of ndarray are not hashable, meaning they cannot be added to
sets, nor used as keys in dictionaries. This is by design - ndarray
objects are mutable, and therefore cannot reliably implement the
__hash__() method.
The hashable class allows a way around this limitation. It implements
the required methods for hashable objects in terms of an encapsulated
ndarray object. This can be either a copied instance (which is safer)
or the original object (which requires the user to be careful enough
not to modify it)."""
def __init__(self, wrapped, tight=False) -> None:
"""Creates a new hashable object encapsulating an ndarray.
wrapped
The wrapped ndarray.
tight
Optional. If True, a copy of the input ndaray is created.
Defaults to False.
"""
self.__tight = tight
self.__wrapped = np.array(wrapped) if tight else wrapped
self.__hash = int(sha1(wrapped.view()).hexdigest(), 16)
def __eq__(self, other) -> bool:
return | np.all(self.__wrapped == other.__wrapped) | numpy.all |
import numpy as np
from matplotlib import pyplot as plt
from tqdm import tqdm
import nn
import nn.functional as F
n_features = 28 * 28
n_classes = 10
n_epochs = 10
bs = 1000
lr = 1e-3
lengths = (n_features, 512, n_classes)
class Model(nn.Module):
# TODO Design the classifier.
def __init__(self, x, length):
self.x = x
self.length = length
def forward(self, x: np.ndarray) -> np.ndarray:
H1 = x.nn.BatchNorm1d(self.length)
H2 = nn.Conv2d.forward(H1, in_channels=3, channels=1)
H3 = nn.ReLu.forward(H2)
H4 = nn.Avgpool.forward(H3)
# End of todo
def load_mnist(mode='train', n_samples=None):
images = './train-images-idx3-ubyte' if mode == 'train' else './t10k-images-idx3-ubyte'
labels = './train-labels-idx1-ubyte' if mode == 'train' else './t10k-labels-idx1-ubyte'
length = 60000 if mode == 'train' else 10000
X = np.fromfile(open(images), np.uint8)[16:].reshape((length, 28, 28)).astype(np.int32)
y = np.fromfile(open(labels), np.uint8)[8:].reshape((length)).astype(np.int32)
return (X[:n_samples].reshape(n_samples, -1), y[:n_samples]) if n_samples is not None else (X.reshape(length, -1), y)
def vis_demo(model):
X, y = load_mnist('test', 20)
probs = model.forward(X)
preds = np.argmax(probs, axis=1)
fig = plt.subplots(nrows=4, ncols=5, sharex='all', sharey='all')[1].flatten()
for i in range(20):
img = X[i].reshape(28, 28)
fig[i].set_title(preds[i])
fig[i].imshow(img, cmap='Greys', interpolation='nearest')
fig[0].set_xticks([])
fig[0].set_yticks([])
plt.tight_layout()
plt.savefig("vis.png")
plt.show()
def main():
trainloader = nn.data.DataLoader(load_mnist('train'), batch=bs)
testloader = nn.data.DataLoader(load_mnist('test'))
model = Model(lengths)
optimizer = nn.optim.SGD(model, lr=lr, momentum=0.9)
criterion = F.CrossEntropyLoss(n_classes=n_classes)
for i in range(n_epochs):
bar = tqdm(trainloader, total=6e4 / bs)
bar.set_description(f'epoch {i:2}')
for X, y in bar:
probs = model.forward(X)
loss = criterion(probs, y)
model.backward(loss.backward())
optimizer.step()
preds = | np.argmax(probs, axis=1) | numpy.argmax |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.