prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
from scipy import linalg as ln
def chained_integrator_dynamics(dt=0.1, n=2, decay=1, amplification = 1, fullB=False):
''' forward euler discretization of
dx_i/dt = x_{i+1}; dx_n/dt = u
with added decay of states or amplification
of integration terms '''
if hasattr(decay, "__len__"):
assert len(decay) == n, 'incorrect number of decay coefficients'
else:
decay = decay * np.ones(n)
if hasattr(amplification, "__len__"):
assert len(amplification) == n, 'incorrect number of amplification coefficients'
else:
amplification = amplification * | np.ones(n) | numpy.ones |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
import glob
import sys
import os
import json
import logging
import warnings
import datetime
import io
from string import Template
from shutil import copyfile
warnings.filterwarnings("ignore")
import numpy as np
import skimage
import skimage.io
import skimage.exposure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.colors as color
import sct_utils as sct
from spinalcordtoolbox.image import Image
import spinalcordtoolbox.reports.slice as qcslice
from spinalcordtoolbox import __sct_dir__
logger = logging.getLogger(__name__)
class QcImage(object):
"""
Class used to create a .png file from a 2d image produced by the class "Slice"
"""
_labels_regions = {'PONS': 50, 'MO': 51,
'C1': 1, 'C2': 2, 'C3': 3, 'C4': 4, 'C5': 5, 'C6': 6, 'C7': 7,
'T1': 8, 'T2': 9, 'T3': 10, 'T4': 11, 'T5': 12, 'T6': 13, 'T7': 14, 'T8': 15, 'T9': 16,
'T10': 17, 'T11': 18, 'T12': 19,
'L1': 20, 'L2': 21, 'L3': 22, 'L4': 23, 'L5': 24,
'S1': 25, 'S2': 26, 'S3': 27, 'S4': 28, 'S5': 29,
'Co': 30}
_color_bin_green = ["#ffffff", "#00ff00"]
_color_bin_red = ["#ffffff", "#ff0000"]
_labels_color = ["#04663c", "#ff0000", "#50ff30",
"#ed1339", "#ffffff", "#e002e8",
"#ffee00", "#00c7ff", "#199f26",
"#563691", "#848545", "#ce2fe1",
"#2142a6", "#3edd76", "#c4c253",
"#e8618a", "#3128a3", "#1a41db",
"#939e41", "#3bec02", "#1c2c79",
"#18584e", "#b49992", "#e9e73a",
"#3b0e6e", "#6e856f", "#637394",
"#36e05b", "#530a1f", "#8179c4",
"#e1320c", "#52a4df", "#000ab5",
"#4a4242", "#0b53a5", "#b49c19",
"#50e7a9", "#bf5a42", "#fa8d8e",
"#83839a", "#320fef", "#82ffbf",
"#360ee7", "#551960", "#11371e",
"#e900c3", "#a21360", "#58a601",
"#811c90", "#235acf", "#49395d",
"#9f89b0", "#e08e08", "#3d2b54",
"#7d0434", "#fb1849", "#14aab4",
"#a22abd", "#d58240", "#ac2aff"]
# _seg_colormap = plt.cm.autumn
def __init__(self, qc_report, interpolation, action_list, stretch_contrast=True,
stretch_contrast_method='contrast_stretching', angle_line=None):
"""
Parameters
----------
qc_report : QcReport
The QC report object
interpolation : str
Type of interpolation used in matplotlib
action_list : list of functions
List of functions that generates a specific type of images
stretch_contrast : adjust image so as to improve contrast
stretch_contrast_method: {'contrast_stretching', 'equalized'}: Method for stretching contrast
angle_line: [float]: See generate_qc()
"""
self.qc_report = qc_report
self.interpolation = interpolation
self.action_list = action_list
self._stretch_contrast = stretch_contrast
self._stretch_contrast_method = stretch_contrast_method
self._angle_line = angle_line
self._centermass = None # center of mass returned by slice.Axial.get_center()
"""
action_list contain the list of images that has to be generated.
It can be seen as "figures" of matplotlib to be shown
Ex: if 'colorbar' is in the list, the process will generate a color bar in the "img" folder
"""
def line_angle(self, mask, ax):
"""Create figure with line superposed over each mosaic square. The line has an angle encoded in the
argument self._angle_line"""
angles = np.full_like(np.zeros(len(self._centermass)), np.nan)
angles[0:len(self._angle_line)] = self._angle_line
img = np.full_like(mask, np.nan)
ax.imshow(img, cmap='gray', alpha=0, aspect=float(self.aspect_mask))
for nslice, center_mosaic in enumerate(self._centermass):
if np.isnan(angles[nslice]):
pass
else:
x0, y0 = center_mosaic[0], center_mosaic[1]
angle = angles[nslice]
if not (-np.pi <= angle <= np.pi):
raise Exception("angle prompted for angle_line not in the range [-pi pi]")
x_min, y_min = x0 - 10, y0 - 10
x_max, y_max = x0 + 10, y0 + 10
if -np.pi/4 < angle <= np.pi/4 or -np.pi <= angle <= -3*np.pi/4 or 3*np.pi/4 < angle <= np.pi:
y1 = y_min
y2 = y_max
x1 = (y_min - y0) * np.tan(angle) + x0
x2 = (y_max - y0) * np.tan(angle) + x0
else:
x1 = x_min
x2 = x_max
y1 = y0 + (x_min - x0) / np.tan(angle)
y2 = y0 + (x_max - x0) / np.tan(angle)
ax.plot([x1, x2], [y1, y2], '-', color='red', linewidth=0.7)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def listed_seg(self, mask, ax):
"""Create figure with red segmentation. Common scenario."""
img = np.rint(np.ma.masked_where(mask < 1, mask))
ax.imshow(img,
cmap=color.ListedColormap(self._color_bin_red),
norm=color.Normalize(vmin=0, vmax=1),
interpolation=self.interpolation,
alpha=1,
aspect=float(self.aspect_mask))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def template(self, mask, ax):
"""Show template statistical atlas"""
values = mask
values[values < 0.5] = 0
color_white = color.colorConverter.to_rgba('white', alpha=0.0)
color_blue = color.colorConverter.to_rgba('blue', alpha=0.7)
color_cyan = color.colorConverter.to_rgba('cyan', alpha=0.8)
cmap = color.LinearSegmentedColormap.from_list('cmap_atlas',
[color_white, color_blue, color_cyan], N=256)
ax.imshow(values,
cmap=cmap,
interpolation=self.interpolation,
aspect=self.aspect_mask)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def no_seg_seg(self, mask, ax):
"""Create figure with image overlay. Notably used by sct_registration_to_template"""
ax.imshow(mask, cmap='gray', interpolation=self.interpolation, aspect=self.aspect_mask)
self._add_orientation_label(ax)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def sequential_seg(self, mask, ax):
values = np.ma.masked_equal(np.rint(mask), 0)
ax.imshow(values,
cmap=self._seg_colormap,
interpolation=self.interpolation,
aspect=self.aspect_mask)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def label_vertebrae(self, mask, ax):
"""Draw vertebrae areas, then add text showing the vertebrae names"""
from matplotlib import colors
import scipy.ndimage
img = np.rint(np.ma.masked_where(mask < 1, mask))
ax.imshow(img,
cmap=colors.ListedColormap(self._labels_color),
norm=colors.Normalize(vmin=0, vmax=len(self._labels_color)),
interpolation=self.interpolation,
alpha=1,
aspect=float(self.aspect_mask))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
a = [0.0]
data = mask
for index, val in np.ndenumerate(data):
if val not in a:
a.append(val)
index = int(val)
if index in self._labels_regions.values():
color = self._labels_color[index]
y, x = scipy.ndimage.measurements.center_of_mass(np.where(data == val, data, 0))
# Draw text with a shadow
x += 10
label = list(self._labels_regions.keys())[list(self._labels_regions.values()).index(index)]
ax.text(x, y, label, color='black', clip_on=True)
x -= 0.5
y -= 0.5
ax.text(x, y, label, color=color, clip_on=True)
def highlight_pmj(self, mask, ax):
"""Hook to show a rectangle where PMJ is on the slice"""
y, x = | np.where(mask == 50) | numpy.where |
import os, sys
from math import sqrt, copysign
import pandas as pd
import numpy as np
import helpers as nhp
from helpers import rotmat_dict, rotmats
from LatticeModel import LatticeModel
from cached_property import cached_property
import random
from itertools import combinations
import plotly as py
import plotly.graph_objs as go
from Bio.PDB import PDBParser
from Bio.PDB.QCPSuperimposer import QCPSuperimposer
pdb_parser = PDBParser()
imposer = QCPSuperimposer()
neighbor_mods = np.array([
[2, 2, 2],[-2, -2, -2],
[-2, 2, 2],[2, -2, 2],[2, 2, -2],
[-2, -2, 2],[-2, 2, -2],[2, -2, -2]
])
cubic_neighbor_mods = np.array([
[0,0,4], [0,4,0], [4,0,0],
[0,0,-4],[0,-4,0], [-4,0,0],
])
neighbor_mods_d2 = np.unique(np.vstack([nm + neighbor_mods for nm in neighbor_mods]), axis=0)
neighbor_mods2 = np.vstack((neighbor_mods, neighbor_mods_d2))
mod2mod_dict = {nmi: np.argwhere(nhp.inNd(neighbor_mods2, nm1 * 2))[0,0] for nmi, nm1 in enumerate(neighbor_mods)}
tag_mods_single = [np.cumsum(np.tile(mod, (10,1)), axis=0) for mod in neighbor_mods]
# test: allow cubic paths for tags
cubic_tag_mods_single = [np.cumsum(np.tile(mod, (10,1)), axis=0) for mod in cubic_neighbor_mods]
tag_mods_single.extend(cubic_tag_mods_single)
tag_mods_bulk = []
for tm in tag_mods_single:
tmb = np.unique(np.vstack([tms + neighbor_mods2 for tmi, tms in enumerate(tm) if tmi > 1]), axis=0)
tmb = tmb[np.invert(nhp.inNd(tmb, tm))]
tag_mods_bulk.append(tmb)
tag_mods = list(zip(tag_mods_single, tag_mods_bulk))
quad_neighbor_mods_abs = np.array([
[0, 0, 4],
[0, 4, 0],
[4, 0, 0]
])
helix_array = np.array([[0, 0, 0],
[2, -2, 2],
[4, 0, 4],
[2, 2, 6],
[0, 0, 8]])
rotated_helix_array_list = [np.matmul(helix_array, rot) for rot in rotmats]
# mirror_dims = list(combinations([0,1,2], 2)) + [tuple([i]) for i in range(3)] + [(0, 1, 2)]
# mirrored_rotated_helix_array_list = [rhm for rhm in rotated_helix_array_list]
# helix_mod = np.array([[2, -2, 2],
# [2, 2, 2],
# [-2, 2, 2],
# [-2, -2, 2]])
# helix with equidistant neighbors
helix_v_truth = np.array([[6, 2, -2, 2],
[-6, -2, 2, -2]])
helix_h_truth = np.array([0, 0, 8])
# helix with 1 quad face transition
# helix_h_truth = np.array([0, 0, 6])
#
# helix_v_truth = np.array([[6, 0, -2, 2],
# [-6, 0, 2, -2]])
def rotation_matrix_from_vectors(vec1, vec2):
""" Find the rotation matrix that aligns vec1 to vec2
:param vec1: A 3d "source" vector
:param vec2: A 3d "destination" vector
:return mat: A transform matrix (3x3) which when applied to vec1, aligns it with vec2.
from: https://stackoverflow.com/questions/45142959/calculate-rotation-matrix-to-align-two-vectors-in-3d-space
"""
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
class Lattice(LatticeModel):
"""Class containing all that pertains to a particular type of lattice (initialization, allowed moves etc.)
lattice type: body-centered cubic (bcc)
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pdb_id = kwargs.get('pdb_id', 'unknown')
self.experimental_mode = kwargs['experimental_mode']
self.no_regularization = kwargs.get('no_regularization', False)
self.ca_dist = 3.8 # actual CA distance
self.lat_dist = sqrt((0.5 * self.ca_dist) ** 2 / 3) # distance of lattice edge
self.linker_dist = 21 # Distance tagged CA to dye
self.linker_dist_lat = sqrt(self.linker_dist ** 2 / 3)
self.n1_dist = 1.48 # estimate of distance N to CA
self.pairs_mat = kwargs['pairs_mat']
self.ss_df = kwargs['secondary_structure']
# self.sheet_series = nhp.list_sheet_series(self.ss_sequence)
self.coords = kwargs.get('coords', None)
self.prev_coords = self.coords.copy()
self.branch_rotation_idx_list = list(range(len(rotmats)))
self.cm_coords = kwargs.get('cm_coords', None)
self.finetune_structure = kwargs.get('finetune_structure', False)
def coords_are_valid(self):
"""
For testing purposes!
"""
for i, c in enumerate(self.coords[:-1]):
if not np.all(np.abs(self.coords[i+1] - c) == 2): return False
return True
@property
def cm_coords(self):
return self._cm_coords
@cm_coords.setter
def cm_coords(self, coords):
"""
Translate cm coords to unit lattice
"""
if coords is None:
self._cm_coords = None
return
self._cm_coords = (coords - coords[0]) / self.lat_dist
@cached_property
def sheet_block_dict(self):
out_dict = {}
cur_block_idx = 0
in_block = False
for si, ss in enumerate(self.ss_sequence):
if ss == 'S':
if not in_block:
cur_block_idx += 1
in_block = True
out_dict[si] = cur_block_idx
else:
in_block = False
return out_dict
@property
def ss_df(self):
return self._ss_df
@ss_df.setter
def ss_df(self, df):
df.loc[self.tagged_resi, :] = 00, 4, 4
df.loc[:, 'L'] = 0
df[df > 0] = 0
self._ss_df = df
# --- mutations ---
def apply_n_steps(self, n):
global_fun_list = [
# self.apply_crankshaft_move,
self.apply_branch_rotation,
self.apply_corner_flip,
# self.apply_pull_move # screws up helices, can't get it right
]
for _ in range(n):
random.shuffle(global_fun_list)
if global_fun_list[0](): pass
elif global_fun_list[1](): pass
# elif global_fun_list[2](): pass
# elif global_fun_list[3](): pass
else: return False
self.set_hash_list()
self.__dict__.pop('e_matrix', None)
return True
def check_helicity(self):
# test: see if helices are still in place
for ci, ss in self.ss_df.iterrows():
if ss.H >= 0: continue
helix_candidate = self.coords[ci:ci + 5] - self.coords[ci]
hel_dists = [np.linalg.norm(helix_candidate - hel) for hel in rotated_helix_array_list]
if not np.any(np.array(hel_dists) == 0):
return ci
@property
def branch_rotation_idx_list(self):
random.shuffle(self._branch_rotation_idx_list)
return self._branch_rotation_idx_list
@branch_rotation_idx_list.setter
def branch_rotation_idx_list(self, bri_list):
self._branch_rotation_idx_list = bri_list
def apply_branch_rotation(self):
mutations = list(range(-3, 4))
mutations.remove(0)
random.shuffle(mutations)
idx_list = list(range(self.seq_length - 1))
idx_list = np.array(idx_list)[self.ss_sequence[:-1] != 'H']
random.shuffle(idx_list) # randomize positions to check
for ci in idx_list: # omit last position, where rotation does not make sense
for mi in self.branch_rotation_idx_list:
candidate = self.branch_rotation(self._coords[ci + 1:, :], self._coords[ci, :], mi)
if not np.any(nhp.inNd(candidate, self.coords[:ci, :])):
self._coords[ci + 1:, :] = candidate
return True
# candidate[ci + 1:, :] = self.branch_rotation(self._coords[ci + 1:, :], self._coords[ci, :], mut)
# if self.is_valid_candidate(candidate):
# self.coords = candidate
# return True
return False
def apply_pull_move(self):
direction = [-1, 1]
random.shuffle(direction)
idx_list = list(range(2, self.seq_length - 2))
idx_list = np.array(idx_list)[self.ss_sequence[2:-2] != 'H']
random.shuffle(idx_list) # randomize positions to check
candidate_found = False
for ri in idx_list:
for dir in direction:
if self.ss_sequence[ri + dir] == 'H' or self.ss_sequence[ri + dir * 2] == 'H': continue
# Candidates for first moved atom should be
l0_candidates = self.coords[ri + dir] + neighbor_mods_d2 # reachable from their old pos by 2 steps
l0_candidates = l0_candidates[nhp.inNd(l0_candidates, self.coords[ri] + neighbor_mods)] # adjacent to non-moved atom
l0_candidates = l0_candidates[np.invert(nhp.inNd(l0_candidates, self.coords))] # unoccupied
if not len(l0_candidates): continue
np.random.shuffle(l0_candidates)
for l0 in l0_candidates:
# Candidates for second moved atom should be...
l1_candidates = self.coords[ri + dir * 2] + neighbor_mods_d2 # reachable from their old pos by 2 steps
l1_candidates = l1_candidates[nhp.inNd(l1_candidates, l0 + neighbor_mods)] # adjacent to new l0 coord
if not len(l1_candidates): continue
l1_candidates = l1_candidates[np.invert(self.inNd(l1_candidates))] # unoccupied
if not len(l1_candidates): continue
l0_idx = ri + dir
d2_pos = l1_candidates[np.random.randint(len(l1_candidates))]
# Get position for third moved atom: between new d2 position and old d2 position
d1_candidates = d2_pos + neighbor_mods
d1_pos = d1_candidates[nhp.inNd(d1_candidates, self.coords[ri + dir * 2] + neighbor_mods)][0]
if self.inNd(d1_pos)[0]: continue
self._coords[ri + dir] = l0
change_idx = np.arange(ri + 2, self.seq_length) if dir == 1 else np.arange(ri-1)[::-1]
candidate_found = True
break
if candidate_found: break
if candidate_found: break
if not candidate_found:
return False
# Fill in positions
prev_c = l0_idx
first_H = True
for c in change_idx:
if self.ss_sequence[c] != 'H' and np.all(np.abs(self.coords[c] - self.coords[prev_c]) == 2):
break
if self.ss_sequence[c] == 'H':
if first_H:
helix_transl = self.coords[c] - d2_pos
self.coords[c] = d2_pos
first_H = False
else:
d2_pos = d1_pos
d1_pos = self.coords[c-1]
self.coords[c] = self.coords[c] + helix_transl
continue
else:
first_H = True
old_coord = self.coords[c].copy()
self.coords[c] = d2_pos
d2_pos = d1_pos
d1_pos = old_coord
prev_c = c
return True
def apply_corner_flip(self):
# Find idx of corners
diff1 = self.coords[1:] - self.coords[:-1]
corner_bool = np.invert(np.all(np.equal(diff1[:-1], diff1[1:]), axis=1))
# corner_bool = np.count_nonzero((self._coords[2:, :] - self._coords[:-2, :]), axis=1) == 1
corner_bool[self.ss_sequence[1:-1] == 'H'] = False
if not np.any(corner_bool): return False
corner_idx = np.squeeze(np.argwhere(corner_bool), axis=1) + 1 # +1 as idx was of left neighbor
np.random.shuffle(corner_idx)
# Generate & check candidates
for ci in corner_idx:
candidate = self.corner_flip(self._coords[ci - 1, :3],
self._coords[ci, :3],
self._coords[ci + 1, :3])
if not self.inNd(candidate)[0]:
# if not nhp.inNd(candidate, self.coords)[0]:
self._coords[ci, :] = candidate
return True
return False
def apply_crankshaft_move(self):
# temporarily shutdown: not sure how this contributes for BCC
diff_4pos = self._coords[3:, :] - self._coords[:-3, :] # Diff res with two spaces
crank_bool = np.all(np.absolute(diff_4pos) == 2, axis=1)
# crank_bool = np.sum(np.absolute(diff_4pos), axis=1) == 2 # if diff is 2 for that postion, it must be a u-loop
if not np.any(crank_bool): return False
crank_idx = np.squeeze(np.argwhere(crank_bool), axis=1) # index of left-most position of the four points!
np.random.shuffle(crank_idx)
# Generate & check candidates
for ci in crank_idx:
crank_idx, crank_dir = abs(ci), copysign(1, ci)
c0, c1, c2, c3 = self.coords[ci:ci + 4, :]
c1_candidates = c0 + neighbor_mods
c2_candidates = c3 + neighbor_mods
c1_candidates = c1_candidates[np.invert(self.inNd(c1_candidates)), :]
c2_candidates = c2_candidates[np.invert(self.inNd(c2_candidates)), :]
if not len(c1_candidates) or not len(c2_candidates): continue
np.random.shuffle(c1_candidates)
for c1_candidate in c1_candidates:
c2_idx = nhp.inNd(c2_candidates, c1_candidate + neighbor_mods)
if np.any(c2_idx):
c2_candidates = c2_candidates[c2_idx]
np.random.shuffle(c2_candidates)
self._coords[ci + 1:ci + 3, :] = np.vstack((c1_candidate, c2_candidates[0]))
return True
return False
def set_hash_list(self):
self.hash_list = set([hash(cc.tostring()) for cc in self.coords])
def inNd(self, c):
if c.ndim == 1:
c = np.expand_dims(c, 0)
c_hash_list = [hash(cc.tostring()) for cc in c]
return [ch in self.hash_list for ch in c_hash_list]
@staticmethod
def branch_rotation(c, pivot, dim):
"""
:param c: coordinates to change
:param pivot: point around which to rotate
:param dim: signed dimension in which to perform rotation (1, 2 or 3), pos for fwd, neg for rev
:return: mutated coords
"""
return np.dot(rotmats[dim], (c - pivot).T).T + pivot
@staticmethod
def corner_flip(c1, c2, c3):
return c2 + ((c1 + c3) - 2 * c2)
# --- stats and derived properties ----
def get_pdb_coords(self, intermediate=False, conect_only=False):
"""
Return coordinates in pdb format, as string
:param intermediate: return without CONECT cards, required to create pdbs with multiple models
:param conect_only: only return the CONECT cards
:return:
"""
coords_ca = self.coords - self.coords[0] # translate to 0,0,0
coords_ca = coords_ca * self.lat_dist # unit distances to real distances
cn = (self.coords[1] - self.coords[0]) * -1 * sqrt(self.n1_dist ** 2 / 3) # stick on N1 in opposite direction of chain
cn_str = nhp.pdb_coord(cn)
# resn = nhp.aa_dict[self.aa_sequence[0]]
resn = nhp.aa_dict.get(self.aa_sequence[0], self.aa_sequence[0])
txt = f'HETATM 1 N {resn} A 1 {cn_str} 1.00 1.00 N\n'
# Add CA coordinates
an = 2 # atom number, start at 2 for first N
an_alpha = 1 # tracker of alpha carbon atom number, just for CONECT record
resi = 1
conect = ""
tag_coord_dict = {0: []} # Fill in tag at pos 0, in case no other residues are tagged
for ci in self.tagged_resi:
if ci == 0: continue
tag_coord_dict[ci], tag_coord_dict[0] = self.get_dye_coords(ci, 0)
for ci, ca in enumerate(coords_ca):
# --- add alpha carbon CA ---
# resn_str = nhp.aa_dict[self.aa_sequence[ci]]
resn_str = nhp.aa_dict.get(self.aa_sequence[ci], self.aa_sequence[ci])
resi_str = str(resi).rjust(4)
ca_str = nhp.pdb_coord(ca)
txt += f'HETATM{str(an).rjust(5)} CA {resn_str} A{resi_str} {ca_str} 1.00 1.00 C\n'
conect += f"CONECT{str(an_alpha).rjust(5)}{str(an).rjust(5)}\n"
an_alpha = an
an += 1
if ci in self.tagged_resi: # Add tag residue
if not len(tag_coord_dict[ci]): continue
dye_coord = tag_coord_dict[ci]
tc_str = nhp.pdb_coord(dye_coord[0])
txt += f'HETATM{str(an).rjust(5)} CB {resn_str} A{resi_str} {tc_str} 1.00 1.00 C\n'
conect += f"CONECT{str(an_alpha).rjust(5)}{str(an).rjust(5)}\n"
an += 1
resi += 1
# Add terminus
an_str = str(an).rjust(5)
resn_str = nhp.aa_dict.get(self.aa_sequence[-1], self.aa_sequence[-1])
resi_str = str(resi - 1).rjust(4) # - 1: still on same residue as last CA
txt += f'TER {an_str} {resn_str} A{resi_str}\n'
if conect_only:
return conect
elif intermediate:
return txt
return txt + conect
def plot_structure(self, fn=None, auto_open=True):
cols = [nhp.aa_to_hp_dict[aa] if resi not in self.tagged_resi else 'purple'
for resi, aa in enumerate(self.aa_sequence)]
trace_bb = go.Scatter3d(x=self.coords[:, 0],
y=self.coords[:, 1],
z=self.coords[:, 2],
line=dict(color=cols, width=20),
# marker=dict(size=5)
)
trace_list = [trace_bb]
pmin = np.min(self.coords)
pmax = np.max(self.coords)
layout = go.Layout(scene=dict(
xaxis=dict(range=[pmin, pmax]),
yaxis=dict(range=[pmin, pmax]),
zaxis=dict(range=[pmin, pmax]),
aspectmode='cube'
)
)
fig = go.Figure(data=trace_list, layout=layout)
if fn is None:
py.offline.plot(fig, auto_open=auto_open)
else:
py.offline.plot(fig, filename=fn, auto_open=auto_open)
def get_neighbors(self, c):
return neighbor_mods + c
# --- setters & properties ---
def get_dye_coords(self, ci, partner_idx, expected_value=None):
tag_obstructions_list = self.get_tag_obstructions(ci)
unobstructed_tag_mods = [tm[0] / np.linalg.norm(tm[0]) for ti, tm in enumerate(tag_mods_single) if tag_obstructions_list[ti] == 0]
if not len(unobstructed_tag_mods): return [], []
ptc = (self.coords[partner_idx] - self.coords[ci])
ptc = ptc / np.linalg.norm(ptc)
tag_ca_dist = np.linalg.norm(self.coords[partner_idx] - self.coords[ci]) * self.lat_dist
# Filter tag positions on angle
angle_limit = 70 if tag_ca_dist <= 20 else 0
angles = [nhp.get_angle(ptc, ut) for ut in unobstructed_tag_mods]
ptc_angles_idx = [it for it, ut in enumerate(unobstructed_tag_mods) if angles[it] > angle_limit]
if not len(ptc_angles_idx):
ptc_angles_idx = [np.argmax(angles)]
# Filter tag positions on dihedral
# dist_best = np.Inf
largest_dh = (-np.Inf, ())
tuple_list = []
tag0_obstructions_list = self.get_tag_obstructions(partner_idx)
unobstructed_tag0_mods = [tm[0] / np.linalg.norm(tm[0]) for ti, tm in enumerate(tag_mods_single) if
tag0_obstructions_list[ti] == 0]
if not len(unobstructed_tag0_mods): return [], []
for ti in ptc_angles_idx:
for t0 in unobstructed_tag0_mods:
dihedral = nhp.get_abs_dihedral(self.coords[ci], self.coords[0],
self.coords[ci] + unobstructed_tag_mods[ti],
self.coords[0] + t0)
if dihedral > angle_limit:
tuple_list.append((unobstructed_tag_mods[ti], t0))
if dihedral > largest_dh[0]:
largest_dh = (dihedral, (unobstructed_tag_mods[ti], t0))
# dist = np.abs(dihedral - angles[ti])
# if dist < dist_best:
# tuple_best = [unobstructed_tag_mods[ti], t0]
# dist_best = dist
# if dist_best > 3: return [], []
if len(tuple_list):
tuple_best = random.choice(tuple_list)
else:
tuple_best = largest_dh[1]
return [(self.coords[ci] - self.coords[0]) * self.lat_dist + tuple_best[0] * self.linker_dist], \
[tuple_best[1] * self.linker_dist]
@property
def dist_fingerprint(self):
if len(self.tagged_resi) < 2: return []
fp = {}
for fi in self.tagged_resi:
if fi == 0: continue
dye_coords, dye_coords_0 = self.get_dye_coords(fi, 0)
if not len(dye_coords): continue
cur_fp = []
for d0 in dye_coords_0:
cur_fp.extend([np.linalg.norm(d0 - dc) for dc in dye_coords])
tt = self.tagged_resi_dict[fi] # tag type
if tt in fp:
fp[tt].append(np.mean(cur_fp))
else:
fp[tt] = [np.mean(cur_fp)]
# fp.append(np.mean(cur_fp))
return fp
@property
def base_energy(self):
return np.sum(self.individual_energies)
@property
def individual_energies(self):
"""
Energy cost function
"""
emat, e_wat, e_dsb, e_tag, e_reg = self.e_matrix
e_aa = emat[:-4, :].sum().sum() / 2
e_ss = emat[-4:-1, :].sum().sum()
return e_aa, e_ss, e_wat, e_dsb, e_tag, e_reg
def beta_sheet_bend_rule(self, c):
# return np.sum(np.abs(c[2] - c[0]) == 4) > 1 # true if angles of 109.5 or 180 deg
# return np.sum(np.abs(c[2] - c[0]) == 4) == 3 # true if angles 180 deg
if len(c) == 2:
return True
return np.sum(np.abs(c[2] - c[0]) == 4) == 2 # true if angles 109.5 deg
def beta_sheet_parallel_rule(self, neighbors, adjacents):
parallel_dist = neighbors - adjacents
inverse_dist = neighbors[::-1] - adjacents
parallel_check = nhp.inNd(np.abs(parallel_dist[0]), quad_neighbor_mods_abs)[0] and len(np.unique(parallel_dist, axis=0)) == 1
inverse_check = nhp.inNd(np.abs(inverse_dist[0]), quad_neighbor_mods_abs)[0] and len(np.unique(inverse_dist, axis=0)) == 1
# parallel_check = np.all(nhp.inNd(np.abs(neighbors - adjacents), quad_neighbor_mods_abs))
# inverse_check = np.all(nhp.inNd(np.abs(neighbors[::-1] - adjacents), quad_neighbor_mods_abs))
if parallel_check or inverse_check:
return True
def get_tag_obstructions(self, ci):
tag_obstructions_list = []
for tm_bb, tm_bulk in tag_mods:
bb_clashes = np.array(self.inNd(self.coords[ci] + tm_bb))
bulk_coords = self.coords[ci] + tm_bulk
bulk_clashes = np.array(self.inNd(bulk_coords))
bulk_clashes[nhp.inNd(bulk_coords, self.coords[max(0, ci - 1):min(ci + 2, self.seq_length)])] = False
tag_obstructions_list.append(np.sum(bb_clashes) + np.sum(bulk_clashes))
# clash_bool = np.array(self.inNd(cur_coords))
# clash_bool[nhp.inNd(cur_coords, self.coords[max(0, ci-1):min(ci+2, self.seq_length)])] = False
# tag_obstructions_list.append(np.sum(clash_bool))
# if ci == 0 or ci == self.seq_length - 1:
# tag_obstructions_list = [to - 2 for to in tag_obstructions_list]
# else:
# tag_obstructions_list = [to - 3 for to in tag_obstructions_list]
return tag_obstructions_list
def get_contacts(self, ci):
"""
Find contacting indices:
- maximum 6
- not including direct neighbors and self
- First counting direct adjacents, then lv2 adjacents
- not counting lv2 adjacents, obscured by lv1 adjacents
"""
# Find which lv1 adajacents are matched
contact_idx1 = np.argwhere(self.inNd(self.coords[ci] + neighbor_mods)).squeeze(-1)
# remove lv2 adjacents behind occupied lv1 adjacent vertices
idx_to_remove = [mod2mod_dict[cidx1] for cidx1 in contact_idx1]
nm2_bool = np.ones(len(neighbor_mods2), dtype=bool)
nm2_bool[idx_to_remove] = False
contact_idx2 = np.argwhere(nhp.inNd(self.coords, self.coords[ci] + neighbor_mods2[nm2_bool])).squeeze(-1)
# exclude indices of residue itself and up to 2 neighbors
contact_idx2 = contact_idx2[np.logical_or(contact_idx2 > ci + 2, contact_idx2 < ci - 2)]
# ensure no more than 6 contacts
if len(contact_idx2) > 6:
# Pick 6 closest (automatically ensures lv1 adjacents are picked)
contact_idx2 = contact_idx2[np.argsort([np.linalg.norm(self.coords[ci] - self.coords[ci2]) for ci2 in contact_idx2])][:6]
return contact_idx2
@cached_property
def e_matrix(self):
"""
Energy cost function
"""
# if self.experimental_mode == 13: # only attempt to minimize difference expected and modeled tag distance
# tag0_coord = self.get_dye_coords(0)
# tag_coord = self.get_dye_coords(self.tagged_resi[1])
# if not len(tag0_coord) or not len(tag_coord):
# return np.zeros((self.seq_length+3, self.seq_length)), 0, 0, 1E10
# tag_dist = np.linalg.norm(tag0_coord[0] - tag_coord[0])
# e_tag = np.abs(exp_df.loc[self.pdb_id, 'distance'] - tag_dist) * 100
# return np.zeros((self.seq_length+3, self.seq_length)), 0, 0, e_tag
seqi_list = np.arange(self.seq_length)
e_wat = 0
e_dsb = 0
ss_multiplier = 25
double_s_idx = []
e_aa_array = np.zeros((len(seqi_list), len(seqi_list)), dtype=float)
ss_array = np.tile('L', len(seqi_list))
tag_array = np.zeros(len(seqi_list), dtype=float)
outer_limits = np.vstack((self.coords.max(axis=0), self.coords.min(axis=0)))
for ci, c in enumerate(self.coords):
# If tagged, residue can't contribute to other terms and must be on outer border (heavy penalty)
if ci in self.tagged_resi:
tag_obstructions_list = self.get_tag_obstructions(ci)
tag_array[ci] += 100 * min(tag_obstructions_list)
continue
# alpha helix h-bond contribution
if ci < self.seq_length - 4:
helix_candidate = self.coords[ci:ci + 5] - self.coords[ci]
hel_dists = [np.linalg.norm(helix_candidate - hel) for hel in rotated_helix_array_list]
if np.any(np.array(hel_dists) == 0):
ss_array[ci] = 'H'
c_resn = self.aa_sequence[ci]
contact_idx = self.get_contacts(ci)
for cci in contact_idx:
# res-res contribution
e_aa_array[ci, cci] = self.pairs_mat.loc[c_resn, self.aa_sequence[cci]]
if c_resn == 'C' and self.aa_sequence[cci] == 'C':
e_dsb -= 50 # disulfide bridge bonus
# water contribution
e_wat += (6 - len(contact_idx)) * self.pairs_mat.loc[c_resn, 'HOH']
# beta-strand H-bonds
if self.ss_sequence[ci] != 'S': continue # Cannot form sheet with non-sheet residue
nb_sheet_hbonds = 0
confirmed_sheet_neighbor = None
for idx in contact_idx:
if self.ss_sequence[idx] != 'S': continue # Cannot form sheet with non-sheet residue
if idx in self.sheet_block_dict and ci in self.sheet_block_dict:
if self.sheet_block_dict[idx] == self.sheet_block_dict[ci]: continue # Cannot form sheet with residues from same contiguous strand
if confirmed_sheet_neighbor is not None:
if confirmed_sheet_neighbor not in (ci-1, ci+1): continue # cannot form sheet with neighbors of previously confirmed sheet bond
ss_array[idx] = 'S'
ss_array[ci] = 'S'
confirmed_sheet_neighbor = idx
nb_sheet_hbonds += 1
if nb_sheet_hbonds == 2:
double_s_idx.append(ci)
break
e_ss_array = np.zeros((3, len(seqi_list)), dtype=float)
for ssi, ssc in enumerate(('H', 'S', 'L')):
e_ss_array[ssi, ss_array == ssc] = self.ss_df.loc[ss_array == ssc, ssc] * ss_multiplier
e_ss_array[1, double_s_idx] = e_ss_array[1, double_s_idx] * 2
e_aa_array[np.tril_indices(len(seqi_list), -1)] = e_aa_array.T[np.tril_indices(len(seqi_list), -1)]
if self.finetune_structure:
imposer.set(self.coords, self.cm_coords) # superimpose on center-of-mass coords
imposer.run()
e_tag = imposer.get_rms()
e_ss_array[1, :] = 0 # set sheet modifier to 0
e_out = np.row_stack((np.zeros((self.seq_length, self.seq_length)),
e_ss_array,
| np.zeros(self.seq_length) | numpy.zeros |
#!/usr/bin/env python3
'''
This script reads in data from stdin and plots the distribution of the times of an item.
The data is plotted on a log scale and in the command line using ASCII characters.
Example data:
[
{"id":9,"start":"20211009T214645Z","end":"20211009T215503Z","tags":["Plane Aktivitaet mit Leuten","leute","obj","obj2","obj3","scheduled_today_custom"]},
{"id":8,"start":"20211009T215510Z","end":"20211009T220416Z","tags":["Clarify Bucket Items","clarify","obj","scheduled_today_custom"]},
{"id":7,"start":"20211009T220610Z","end":"20211009T221339Z","tags":["obj"]},
{"id":6,"start":"20211009T221340Z","end":"20211009T221946Z","tags":["obj","obj2","obj3","task"]},
{"id":5,"start":"20211009T222213Z","end":"20211009T223056Z","tags":["obj","obj2","obj3","task"]},
{"id":4,"start":"20211009T223056Z","end":"20211009T223417Z","tags":["Wiege dich","scheduled_today_custom"]}
]
'''
import sys
import datetime
import json
import matplotlib.pyplot as plt
import numpy as np
import time
import termplotlib as tpl
# Disable warning WARNING: CPU random generator seem to be failing, disable hardware random number generation
# Read in the data
data_raw = ''
after_empty_line = False
for line in sys.stdin:
if after_empty_line:
# data.append(json.loads(line))
data_raw += line
if line == '\n':
after_empty_line = True
data = json.loads(data_raw)
# Check if last argument is a number.
# try:
# number_to_show_in_plot = int(sys.argv[-1])
# end_argument = len(sys.argv) - 1
# except:
# number_to_show_in_plot = None
# end_argument = len(sys.argv)
# print("sys.argv:", sys.argv)
# tags_for_filtering = sys.argv[1:end_argument]
# print("tags_for_filtering:", tags_for_filtering)
tags_for_filtering = sys.argv[1:]
# Filter the data
filtered_data = []
for entry in data:
if len(tags_for_filtering) > 0:
tags_match = False
for tag in tags_for_filtering:
tags_match = True
if tag not in entry['tags']:
tags_match = False
if not tags_match:
continue
filtered_data.append(entry)
data = filtered_data
# Plot the data
x_range = []
for entry in data:
# print("entry:", entry)
if 'end' not in entry:
# timestamp in %Y%m%dT%H%M%SZ format
date_now = datetime.datetime.utcnow()
entry['end'] = date_now.strftime('%Y%m%dT%H%M%SZ')
x_range.append((entry['start'], entry['end']))
x_range_start = []
x_range_end = []
for start, end in x_range:
x_range_start.append(datetime.datetime.strptime(start, '%Y%m%dT%H%M%SZ'))
x_range_end.append(datetime.datetime.strptime(end, '%Y%m%dT%H%M%SZ'))
x_range_start = np.array(x_range_start)
x_range_end = np.array(x_range_end)
durations = x_range_end - x_range_start
# Convert to seconds.
durations = np.array(durations.astype('timedelta64[s]').astype(int))
# Filter out inf values.
# Plot in the command line using termplotlib.
# Example usage for termplotlib
# import termplotlib as tpl
# import numpy as np
# rng = np.random.default_rng(123)
# sample = rng.standard_normal(size=1000)
# counts, bin_edges = np.histogram(sample, bins=40)
# fig = tpl.figure()
# fig.hist(counts, bin_edges, grid=[15, 25], force_ascii=False)
# fig.show()
# Plot the seconds in the command line using termplotlib.
# Example usage for termplotlib
NUM_BINS = 40
durations_for_log = [x for x in durations if x > 0]
durations_log_base_10 = np.log10(durations_for_log)
durations_log_base_10 = np.array([x for x in durations_log_base_10 if -np.inf < x < np.inf])
counts, bin_edges = | np.histogram(durations_log_base_10, bins=NUM_BINS) | numpy.histogram |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 26 23:08:32 2021
@author: jimlee
"""
import csv
import numpy as np
import math
# open the csv file
rawData = np.genfromtxt('train.csv', delimiter=',')
data = rawData[1:,3:] # data is ready, but need to be reorganized
# Test NR
print(data[10,0])
''' Here change the NaN term to 0'''
for i in range(data.shape[0]):
for j in range(data.shape[1]):
if(math.isnan(data[i,j])):
data[i,j] = 0
''' Here change the NaN term to 0'''
# Check if NR changed to 0.0
print(data[10,0])
'''
Now We Need To Change The Data To Some Form Like This:
Let x be the feature vector(dim = 18x1)
And x1_1_0 means that the feature vector on 1/1 0:00 and so on
[ x1_1_0 ... x1_1_23 x1_2_0 ... x1_2_23 ... x12_20_0 ... x12_20_23]
The dimension of the matrix must be 18x5760
'''
reorganizedData = np.zeros((18,5760))
startRowIndex = 0
startColumnIndex = 0
counter = 1
for i in range(data.shape[0]):
if counter % 18 == 0:
reorganizedData[:,startColumnIndex:startColumnIndex + 24] = data[startRowIndex:i + 1, :]
startRowIndex = i + 1
startColumnIndex = startColumnIndex + 24
counter += 1
'''Now We Have The ReorganizedData, We Have To Seperate the Train_x, Train_y from it'''
X = np.zeros((5652, 162)) # Train x
y_head = np.zeros((5652,1)) # Train y
for month in range(12):
for hour in range(471):
xi = []
for i in range(hour,hour + 9):
xi = np.append(xi,np.transpose(reorganizedData[:, month * 480 + i]))
y_head[month * 471 + hour, 0] = reorganizedData[9, month * 480 + hour + 9]
X[month * 471 + hour,:] = xi
''' The training data need to be normalized'''
for column in range(X.shape[1]):
X[:,column] = (X[:,column] - X[:,column].mean()) / math.sqrt(X[:,column].var())
''' Now we have successfully sample 5652 sets of training data. It's time to do the iteration'''
''' Define the way of training method'''
method = "ADAM_CUBIC_MODEL"
if method == "ADAGRAD":
print("ADAGRAD")
X = np.concatenate((np.ones((X.shape[0], 1 )), X) , axis = 1).astype(float)
lr = 0.01
w = np.zeros((163,1))
prevGrad = np.zeros((163,1))
eipsilon = 1E-8 # this is for numerical stability
for i in range(1, 100000):
y = np.dot(X,w)
grad = 2 * (np.dot(np.transpose(X),y-y_head))
prevGrad += grad**2
#w = w - lr * grad / (np.sqrt(prevGrad / n))
w -= lr * grad / (np.sqrt(prevGrad) + 1E-8) # 1E-8 is for numerical stable
#w -= lr * grad
''' Calculate the error'''
if i % 1000 == 0:
print("Loss:",np.power(np.sum(np.power(y - y_head, 2 ))/ X.shape[0],0.5))
print(np.dot(np.transpose(y-y_head), (y-y_head)))
elif method == "ADAM":
print("ADAM")
X = np.concatenate((np.ones((X.shape[0], 1 )), X) , axis = 1).astype(float)
lr = 0.1
w = np.zeros((163,1))
beta1 = 0.9
beta2 = 0.999
eipsilon = 1E-8 # this is for numerical stability
v = np.zeros([163,1])
s = np.zeros([163,1])
for i in range(1, 100000):
y = np.dot(X,w)
grad = 2 * (np.dot(np.transpose(X),y-y_head))
v = beta1 * v + (1 - beta1) * grad
s = beta2 * s + (1 - beta2) * grad ** 2
v_correction = v / (1 - beta1 ** i)
s_correction = s / (1 - beta2 ** i)
w -= lr * v_correction / (np.sqrt(s_correction) + eipsilon)
''' Calculate the error'''
if i % 1000 == 0:
print("Loss:",np.power(np.sum(np.power(y - y_head, 2 ))/ X.shape[0],0.5))
print(np.dot(np.transpose(y-y_head), (y-y_head)))
elif method == "ADAM_QUAGRATIC_MODEL":
X_2 = np.power(X,2)
X = np.concatenate((np.ones((X.shape[0], 1 )), X) , axis = 1).astype(float)
X = np.hstack((X,X_2))
print("ADAM_QUAGRATIC_MODEL")
lr = 0.01
w = np.zeros((X.shape[1], 1))
v = np.zeros([X.shape[1],1])
s = np.zeros([X.shape[1],1])
beta1 = 0.9
beta2 = 0.999
eipsilon = 1E-8 # this is for numerical stability
for i in range(1, 100000):
#print(i,"th iteration")
#y = np.dot(X, w1) + np.dot(X_2, w2)
#print(y)
y = np.dot(X, w)
grad = 2 * (np.dot(np.transpose(X),y-y_head))
v = beta1 * v + (1 - beta1) * grad
s = beta2 * s + (1 - beta2) * grad ** 2
v_correction = v / (1 - beta1 ** i)
s_correction = s / (1 - beta2 ** i)
w -= lr * v_correction / (np.sqrt(s_correction) + eipsilon)
''' Calculate the error'''
if i % 1000 == 0:
print("Loss:",np.power(np.sum(np.power(y - y_head, 2 ))/ X.shape[0],0.5))
print(np.dot(np.transpose(y-y_head), (y-y_head)))
elif method == "ADAM_CUBIC_MODEL":
X_2 = np.power(X,2)
X_3= np.power(X,3)
X = np.concatenate((np.ones((X.shape[0], 1 )), X) , axis = 1).astype(float)
X = np.hstack((X,X_2))
X = np.hstack((X,X_3))
print("ADAM_QUAGRATIC_MODEL")
lr = 0.1
w = np.zeros((X.shape[1], 1))
v = np.zeros([X.shape[1],1])
s = np.zeros([X.shape[1],1])
beta1 = 0.9
beta2 = 0.999
eipsilon = 1E-8 # this is for numerical stability
for i in range(1, 100000):
#print(i,"th iteration")
#y = np.dot(X, w1) + np.dot(X_2, w2)
#print(y)
y = np.dot(X, w)
grad = 2 * (np.dot(np.transpose(X),y-y_head))
v = beta1 * v + (1 - beta1) * grad
s = beta2 * s + (1 - beta2) * grad ** 2
v_correction = v / (1 - beta1 ** i)
s_correction = s / (1 - beta2 ** i)
w -= lr * v_correction / (np.sqrt(s_correction) + eipsilon)
''' Calculate the error'''
if i % 1000 == 0:
print("Loss:",np.power(np.sum(np.power(y - y_head, 2 ))/ X.shape[0],0.5))
print(np.dot(np.transpose(y-y_head), (y-y_head)))
elif method == "ADAGRAD_CUBIC_MODEL":
X_2 = np.power(X,2)
X_3= np.power(X,3)
X = np.concatenate((np.ones((X.shape[0], 1 )), X) , axis = 1).astype(float)
X = np.hstack((X,X_2))
X = np.hstack((X,X_3))
w = | np.zeros([X.shape[1],1]) | numpy.zeros |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import csv
import os.path
from download import download
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
def define_model(info: dict, level: str = "stock"):
"""
Define and return graphical model.
Parameters
----------
info: dict
Data information.
level: str
Level of the model; possible candidates are "stock", "industry", "sector" and "market".
"""
tt = info['tt']
order_scale = info['order_scale']
order = len(order_scale) - 1
num_sectors = info['num_sectors']
sec2ind_id = info['sector_industries_id']
ind_id = info['industries_id']
available_levels = ["market", "sector", "industry", "stock"]
if level not in available_levels:
raise Exception("Selected level is unknown. Please provide one of the following levels: {}.".format(available_levels))
m = [tfd.Normal(loc=tf.zeros([1, order + 1]), scale=4 * order_scale), # phi_m
tfd.Normal(loc=0, scale=4)] # psi_m
if level != "market":
m += [lambda psi_m, phi_m: tfd.Normal(loc=tf.repeat(phi_m, num_sectors, axis=0), scale=2 * order_scale), # phi_s
lambda phi_s, psi_m: tfd.Normal(loc=psi_m, scale=2 * tf.ones([num_sectors, 1]))] # psi_s
if level != "sector":
sec2ind_id = info['sector_industries_id']
m += [lambda psi_s, phi_s: tfd.Normal(loc=tf.gather(phi_s, sec2ind_id, axis=0), scale=order_scale), # phi_i
lambda phi_i, psi_s: tfd.Normal(loc=tf.gather(psi_s, sec2ind_id, axis=0), scale=1)] # psi_ii
if level != "industry":
ind_id = info['industries_id']
m += [lambda psi_i, phi_i: tfd.Normal(loc=tf.gather(phi_i, ind_id, axis=0), scale=0.5 * order_scale), # phi
lambda phi, psi_i: tfd.Normal(loc=tf.gather(psi_i, ind_id, axis=0), scale=0.5)] # psi
if level == "market":
m += [lambda psi_m, phi_m: tfd.Normal(loc=tf.tensordot(phi_m, tt, axes=1), scale=tf.math.softplus(psi_m))] # y
if level == "sector":
m += [lambda psi_s, phi_s: tfd.Normal(loc=tf.tensordot(phi_s, tt, axes=1), scale=tf.math.softplus(psi_s))] # y
if level == "industry":
m += [lambda psi_i, phi_i: tfd.Normal(loc=tf.tensordot(phi_i, tt, axes=1), scale=tf.math.softplus(psi_i))] # y
if level == "stock":
m += [lambda psi, phi: tfd.Normal(loc=tf.tensordot(phi, tt, axes=1), scale=tf.math.softplus(psi))] # y
return tfd.JointDistributionSequentialAutoBatched(m)
def training(logp: np.array, info: dict, learning_rate: float = 0.01, num_steps: int = 20000, plot_losses: bool = False):
"""
It performs sequential optimization over the model parameters via Adam optimizer, training at different levels to
provide sensible initial solutions at finer levels.
Parameters
----------
logp: np.array
Log-price at stock-level.
info: dict
Data information.
learning_rate: float
Adam's fixed learning rate.
num_steps: int
Adam's fixed number of iterations.
plot_losses: bool
If True, a losses decay plot is saved in the current directory.
Returns
-------
It returns trained parameters.
"""
optimizer = tf.optimizers.Adam(learning_rate=learning_rate)
num_steps_l = int(np.ceil(num_steps // 4))
# market
model = define_model(info, "market")
phi_m, psi_m = (tf.Variable(tf.zeros_like(model.sample()[:2][i])) for i in range(2))
loss_m = tfp.math.minimize(lambda: -model.log_prob([phi_m, psi_m, logp.mean(0, keepdims=1)]),
optimizer=optimizer, num_steps=num_steps_l)
# sector
model = define_model(info, "sector")
phi_m, psi_m = tf.constant(phi_m), tf.constant(psi_m)
phi_s, psi_s = (tf.Variable(tf.zeros_like(model.sample()[2:4][i])) for i in range(2))
logp_s = np.array([logp[np.where(np.array(info['sectors_id']) == k)[0]].mean(0) for k in range(info['num_sectors'])])
loss_s = tfp.math.minimize(lambda: -model.log_prob([phi_m, psi_m, phi_s, psi_s, logp_s]),
optimizer=optimizer, num_steps=num_steps_l)
# industry
model = define_model(info, "industry")
phi_s, psi_s = tf.constant(phi_s), tf.constant(psi_s)
phi_i, psi_i = (tf.Variable(tf.zeros_like(model.sample()[4:6][i])) for i in range(2))
logp_i = np.array([logp[np.where(np.array(info['industries_id']) == k)[0]].mean(0) for k in range(info['num_industries'])])
loss_i = tfp.math.minimize(lambda: -model.log_prob([phi_m, psi_m, phi_s, psi_s, phi_i, psi_i, logp_i]),
optimizer=optimizer, num_steps=num_steps_l)
# stock
model = define_model(info, "stock")
phi_i, psi_i = tf.constant(phi_i), tf.constant(psi_i)
phi, psi = (tf.Variable(tf.zeros_like(model.sample()[6:8][i])) for i in range(2))
loss = tfp.math.minimize(lambda: -model.log_prob([phi_m, psi_m, phi_s, psi_s, phi_i, psi_i, phi, psi, logp]),
optimizer=optimizer, num_steps=num_steps_l)
if plot_losses:
fig_name = 'losses_decay.png'
fig = plt.figure(figsize=(20, 3))
plt.subplot(141)
plt.title("market-level", fontsize=12)
plt.plot(loss_m)
plt.subplot(142)
plt.title("sector-level", fontsize=12)
plt.plot(loss_s)
plt.subplot(143)
plt.title("industry-level", fontsize=12)
plt.plot(loss_i)
plt.subplot(144)
plt.title("stock-level", fontsize=12)
plt.plot(loss)
plt.legend(["loss decay"], fontsize=12, loc="upper right")
plt.xlabel("iteration", fontsize=12)
fig.savefig(fig_name, dpi=fig.dpi)
print('Losses decay plot has been saved in this directory as {}.'.format(fig_name))
return phi_m, psi_m, phi_s, psi_s, phi_i, psi_i, phi, psi
def softplus(x: np.array):
"""
It is a function from real to positive numbers
Parameters
----------
x: np.array
Real value.
"""
return np.log(1 + np.exp(x))
def order_selection(logp: np.array, info: dict, orders: np.array = np.arange(1, 14), horizon: int = 5):
"""
It is a function from real to positive numbers
Parameters
----------
logp: np.array
Log-prices at stock-level.
info: dict
Data information.
orders: np.array
Array of candidate orders.
horizon: int
Number of days to evaluate prediction.
"""
print("\nModel selection in progress. This can take a few minutes...")
t = logp[:, :-horizon].shape[1]
min_loss = np.inf
count = 0
for i, order in enumerate(orders):
info['tt'] = (np.linspace(1 / t, 1, t) ** np.arange(order + 1).reshape(-1, 1)).astype('float32')
info['order_scale'] = np.linspace(1 / (order + 1), 1, order + 1)[::-1].astype('float32')[None, :]
# training the model
phi_m, psi_m, phi_s, psi_s, phi_i, psi_i, phi, psi = training(logp[:, :-horizon], info)
# construct loss
tt_pred = ((1 + (np.arange(1, 1 + horizon) / t)) ** np.arange(order + 1).reshape(-1, 1)).astype('float32')
logp_pred = np.dot(phi.numpy(), tt_pred)
std_logp_pred = softplus(psi.numpy())
scores = (logp_pred - logp[:, -horizon:]) / std_logp_pred
loss = np.abs(np.mean(scores ** 2) - 1)
print("Loss value for backtested polynomial model of order {}: {}.".format(order, loss))
if i > 0 and loss > min_loss:
count += 1
else:
min_loss = loss
min_order = order
count = 0
if count == 3:
break
print("Model selection completed. Volatile will use a polynomial model of degree {}.".format(min_order))
return min_order
if __name__ == '__main__':
cli = ArgumentParser('Volatile: your day-to-day trading companion.',
formatter_class=ArgumentDefaultsHelpFormatter)
cli.add_argument('-s', '--symbols', type=str, nargs='+', help='List of symbols.')
cli.add_argument('--save-table', action='store_true',
help='Save prediction table in csv format.')
cli.add_argument('--no-plots', action='store_true',
help='Plot estimates with their uncertainty over time.')
cli.add_argument('--plot-losses', action='store_true',
help='Plot loss function decay over training iterations.')
args = cli.parse_args()
today = dt.date.today().strftime("%Y-%m-%d")
print('\nDownloading all available closing prices in the last year...')
if args.symbols is None:
with open("symbols_list.txt", "r") as my_file:
args.symbols = my_file.readlines()[0].split(" ")
data = download(args.symbols)
tickers = data["tickers"]
num_stocks, t = data['logp'].shape
# find unique names of sectors
usectors = np.unique(data['sectors'])
num_sectors = len(usectors)
# provide sector IDs at stock-level
sectors_id = [np.where(usectors == sector)[0][0] for sector in data['sectors']]
# find unique names of industries and store indices
uindustries, industries_idx = np.unique(data['industries'], return_index=True)
num_industries = len(uindustries)
# provide industry IDs at stock-level
industries_id = [np.where(uindustries == industry)[0][0] for industry in data['industries']]
# provide sector IDs at industry-level
sector_industries_id = np.array(sectors_id)[industries_idx].tolist()
# place relevant information in dictionary
info = dict(num_sectors=num_sectors, num_industries=num_industries, sector_industries_id=sector_industries_id,
industries_id=industries_id, sectors_id=sectors_id)
# how many days to look ahead when comparing the current price against a prediction
horizon = 5
# order of the polynomial
order = order_selection(data['logp'], info)
print("\nTraining the model...")
# times corresponding to trading dates in the data
info['tt'] = (np.linspace(1 / t, 1, t) ** np.arange(order + 1).reshape(-1, 1)).astype('float32')
# reweighing factors for parameters corresponding to different orders of the polynomial
info['order_scale'] = np.linspace(1 / (order + 1), 1, order + 1)[::-1].astype('float32')[None, :]
# training the model
phi_m, psi_m, phi_s, psi_s, phi_i, psi_i, phi, psi = training(data['logp'], info, plot_losses=args.plot_losses)
# calculate stock-level estimators of log-prices
logp_est = np.dot(phi.numpy(), info['tt'])
std_logp_est = softplus(psi.numpy())
# calculate stock-level estimators of prices
p_est = np.exp(logp_est + std_logp_est ** 2 / 2)
std_p_est = np.sqrt(np.exp(2 * logp_est + std_logp_est ** 2) * (np.exp(std_logp_est ** 2) - 1))
# calculate stock-level predictions of log-prices
tt_pred = ((1 + (np.arange(1 + horizon) / t)) ** np.arange(order + 1).reshape(-1, 1)).astype('float32')
logp_pred = np.dot(phi.numpy(), tt_pred)
std_logp_pred = softplus(psi.numpy())
# calculate stock-level prediction of prices
p_pred = np.exp(logp_pred + std_logp_pred ** 2 / 2)
std_p_pred = np.sqrt(np.exp(2 * logp_pred + std_logp_pred ** 2) * (np.exp(std_logp_pred ** 2) - 1))
# calculate industry-level estimators of log-prices
logp_ind_est = np.dot(phi_i.numpy(), info['tt'])
std_logp_ind_est = softplus(psi_i.numpy())
# calculate industry-level estimators of prices
p_ind_est = np.exp(logp_ind_est + std_logp_ind_est ** 2 / 2)
std_p_ind_est = np.sqrt(np.exp(2 * logp_ind_est + std_logp_ind_est ** 2) * ( | np.exp(std_logp_ind_est ** 2) | numpy.exp |
# This routine computes the first-order
# transit timing variations in Agol & Deck (2015). Please
# cite the paper if you make use of this code in your research.
import numpy as np
import matplotlib.pyplot as plt
class Planet(object):
def __init__(self, mass_ratio=None, trans0=None, period=None, ecos=None,
esin=None):
self.mass_ratio = mass_ratio
self.trans0 = trans0
self.period = period
self.ecos = ecos
self.esin = esin
def call_ttv(jmax):
"""
This routine gives an example of a call of compute_ttv.py
which computes the first-order eccentricity TTVs, from
Agol & Deck (2015).
It uses parameters appropriate for the outer two planets
of Kepler-62e/f.
Parameters
----------
jmax: maximum j to evaluate
"""
data = | np.loadtxt('kepler62ef_planets.txt', delimiter=',') | numpy.loadtxt |
"""Utility functions for computing useful statistics."""
import numpy as np
def pearson_residuals(y, pred):
"""Compute Pearson residuals.
Reference:
https://web.as.uky.edu/statistics/users/pbreheny/760/S11/notes/4-12.pdf
:param array-like[int] y: target labels. 1 is positive label, 0 is negative
label
:param array-like[float] pred: predicted labels.
:returns: pearson residual.
:rtype: array-like[float]
"""
y, pred = np.array(y), | np.array(pred) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-08-21 at 12:28
@author: cook
"""
from astropy.table import Table
from astropy import constants as cc
from astropy import units as uu
import numpy as np
import os
from scipy.optimize import curve_fit
import warnings
from apero import core
from apero import lang
from apero.core import constants
from apero.core import math as mp
from apero.core.core import drs_log
from apero.core.core import drs_file
from apero.io import drs_data
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'science.rv.general.py'
__INSTRUMENT__ = 'None'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# get param dict
ParamDict = constants.ParamDict
DrsFitsFile = drs_file.DrsFitsFile
# Get function string
display_func = drs_log.display_func
# Get Logging function
WLOG = drs_log.wlog
# Get the text types
TextEntry = lang.drs_text.TextEntry
TextDict = lang.drs_text.TextDict
# alias pcheck
pcheck = core.pcheck
# Speed of light
# noinspection PyUnresolvedReferences
speed_of_light_ms = cc.c.to(uu.m / uu.s).value
# noinspection PyUnresolvedReferences
speed_of_light = cc.c.to(uu.km / uu.s).value
# =============================================================================
# Define functions
# =============================================================================
def measure_fp_peaks(params, props, limit, normpercent):
"""
Measure the positions of the FP peaks
Returns the pixels positions and Nth order of each FP peak
:param p: parameter dictionary, ParamDict containing constants
Must contain at least:
drift_peak_border_size: int, the border size (edges in
x-direction) for the FP fitting
algorithm
drift_peak_fpbox_size: int, the box half-size (in pixels) to
fit an individual FP peak to - a
gaussian will be fit to +/- this size
from the center of the FP peak
drift_peak_peak_sig_lim: dictionary, the sigma above the median
that a peak must have to be recognised
as a valid peak (before fitting a
gaussian) dictionary must have keys
equal to the lamp types (hc, fp)
drift_peak_inter_peak_spacing: int, the minimum spacing between
peaks in order to be recognised
as a valid peak (before fitting
a gaussian)
log_opt: string, log option, normally the program name
:param loc: parameter dictionary, ParamDict containing data
Must contain at least:
speref: numpy array (2D), the reference spectrum
wave: numpy array (2D), the wave solution image
lamp: string, the lamp type (either 'hc' or 'fp')
:return loc: parameter dictionary, the updated parameter dictionary
Adds/updates the following:
ordpeak: numpy array (1D), the order number for each valid FP
peak
xpeak: numpy array (1D), the central position each gaussain fit
to valid FP peak
ewpeak: numpy array (1D), the FWHM of each gaussain fit
to valid FP peak
vrpeak: numpy array (1D), the radial velocity drift for each
valid FP peak
llpeak: numpy array (1D), the delta wavelength for each valid
FP peak
amppeak: numpy array (1D), the amplitude for each valid FP peak
"""
func_name = __NAME__ + '.create_drift_file()'
# get the reference data and the wave data
speref = np.array(props['SPEREF'])
wave = props['WAVE']
# storage for order of peaks
allpeaksize = []
allordpeak = []
allxpeak = []
allewpeak = []
allvrpeak = []
allllpeak = []
allamppeak = []
alldcpeak = []
allshapepeak = []
# loop through the orders
for order_num in range(speref.shape[0]):
# storage for order of peaks
ordpeak = []
xpeak = []
ewpeak = []
vrpeak = []
llpeak = []
amppeak = []
dcpeak = []
shapepeak = []
# storage of warnings
warn_dict = dict()
# set number of peaks rejected to zero
nreject = 0
# set a counter for total number of peaks
ipeak = 0
# get the pixels for this order
tmp = np.array(speref[order_num, :])
# define indices
index = np.arange(len(tmp))
# ------------------------------------------------------------------
# normalize the spectrum
tmp = tmp / np.nanpercentile(tmp, normpercent)
# ------------------------------------------------------------------
# find the peaks
with warnings.catch_warnings(record=True) as w:
peakmask = (tmp[1:-1] > tmp[2:]) & (tmp[1:-1] > tmp[:-2])
peakpos = np.where(peakmask)[0]
# work out the FP width for this order
size = int(np.nanmedian(peakpos[1:] - peakpos[:-1]))
# ------------------------------------------------------------------
# mask for finding maximum peak
mask = np.ones_like(tmp)
# mask out the edges
mask[:size + 1] = 0
mask[-(size + 1):] = 0
# ------------------------------------------------------------------
# loop for peaks that are above a value of limit
while mp.nanmax(mask * tmp) > limit:
# --------------------------------------------------------------
# find peak along the order
maxpos = np.nanargmax(mask * tmp)
maxtmp = tmp[maxpos]
# --------------------------------------------------------------
# get the values around the max position
index_peak = index[maxpos - size: maxpos + size]
tmp_peak = tmp[maxpos - size: maxpos + size]
# --------------------------------------------------------------
# mask out this peak for next iteration of while loop
mask[maxpos - (size // 2):maxpos + (size // 2) + 1] = 0
# --------------------------------------------------------------
# return the initial guess and the best fit
p0, gg, _, warns = fit_fp_peaks(index_peak, tmp_peak, size)
# --------------------------------------------------------------
# only keep peaks within +/- 1 pixel of original peak
# (gaussian fit is to find sub-pixel value)
cond = np.abs(maxpos - gg[1]) < 1
if cond:
# work out the radial velocity of the peak
lambefore = wave[order_num, maxpos - 1]
lamafter = wave[order_num, maxpos + 1]
deltalam = lamafter - lambefore
# get the radial velocity
waveomax = wave[order_num, maxpos]
radvel = speed_of_light_ms * deltalam / (2.0 * waveomax)
# add to storage
ordpeak.append(order_num)
xpeak.append(gg[1])
ewpeak.append(gg[2])
vrpeak.append(radvel)
llpeak.append(deltalam)
amppeak.append(maxtmp)
shapepeak.append(gg[3])
dcpeak.append(gg[4])
else:
# add to rejected
nreject += 1
# iterator
ipeak += 1
# --------------------------------------------------------------
# deal with warnings
if warns is not None:
if warns in warn_dict:
warn_dict[warns] += 1
else:
warn_dict[warns] = 1
# --------------------------------------------------------------
# log how many FPs were found and how many rejected
wargs = [order_num, ipeak, nreject]
WLOG(params, '', TextEntry('40-018-00001', args=wargs))
# ------------------------------------------------------------------
# print warnings
for key in list(warn_dict.keys()):
wargs = [warn_dict[key], key]
WLOG(params, 'warning', TextEntry('00-018-00001', args=wargs))
# ------------------------------------------------------------------
# add values to all storage (and sort by xpeak)
indsort = np.argsort(xpeak)
allordpeak.append(np.array(ordpeak)[indsort])
allxpeak.append(np.array(xpeak)[indsort])
allewpeak.append(np.array(ewpeak)[indsort])
allvrpeak.append(np.array(vrpeak)[indsort])
allllpeak.append(np.array(llpeak)[indsort])
allamppeak.append(np.array(amppeak)[indsort])
allshapepeak.append(np.array(shapepeak)[indsort])
alldcpeak.append(np.array(dcpeak)[indsort])
allpeaksize.append(size)
# store values in loc
props['ORDPEAK'] = np.concatenate(allordpeak).astype(int)
props['XPEAK'] = np.concatenate(allxpeak)
props['PEAK2PEAK'] = np.concatenate(allewpeak)
props['VRPEAK'] = np.concatenate(allvrpeak)
props['LLPEAK'] = np.concatenate(allllpeak)
props['AMPPEAK'] = np.concatenate(allamppeak)
props['DCPEAK'] = np.concatenate(alldcpeak)
props['SHAPEPEAK'] = np.concatenate(allshapepeak)
props['PEAKSIZE'] = np.array(allpeaksize)
# set source
keys = ['ORDPEAK', 'XPEAK', 'PEAK2PEAK', 'VRPEAK', 'LLPEAK', 'AMPPEAK',
'DCPEAK', 'SHAPEPEAK', 'PEAKSIZE']
props.set_sources(keys, func_name)
# Log the total number of FP lines found
wargs = [len(props['XPEAK'])]
WLOG(params, 'info', TextEntry('40-018-00002', args=wargs))
# return the property parameter dictionary
return props
def fit_fp_peaks(x, y, size, return_model=False):
# storage of warnings
warns = None
# get gauss function
ea_airy = mp.ea_airy_function
# set up initial guess
pnames = ['amp', 'pos', 'period', 'shape', 'dc']
# [amp, position, period, exponent, zero point]
p0 = [np.max(y) - np.min(y), np.median(x), size, 1.5,
np.max([0, np.min(y)])]
# set up the bounds
lowerbounds = [0.5 * p0[0], p0[1] - 2, 0.7 * p0[2], 1.0, 0.0]
upperbounds = [2.0 * p0[0], p0[1] + 2, 1.3 * p0[2], 10.0, 0.5 * p0[0]]
bounds = [lowerbounds, upperbounds]
# test bounds make sense
for p_it in range(len(lowerbounds)):
if lowerbounds[p_it] >= upperbounds[p_it]:
if warns is None:
warns = ''
warns += ('\nBoundError: Lower bound {0} incorrect (lower={1} '
'upper={2})'.format(pnames[p_it], lowerbounds[p_it],
upperbounds[p_it]))
if p0[p_it] < lowerbounds[p_it] or p0[p_it] > upperbounds[p_it]:
if warns is None:
warns = ''
warns += ('\nBoundError: Inital guess for {0} out of bounds '
'(guess={1} lower={2} upper={3})'
''.format(pnames[p_it], p0[p_it],
lowerbounds[p_it], upperbounds[p_it]))
# deal with bad bounds
if warns is not None:
popt = [np.nan, np.nan, np.nan, np.nan, np.nan]
pcov = None
model = np.repeat([np.nan], len(x))
else:
# try to fit etiennes airy function
try:
with warnings.catch_warnings(record=True) as _:
popt, pcov = curve_fit(ea_airy, x, y, p0=p0, bounds=bounds)
model = ea_airy(x, *popt)
except ValueError as e:
# log that ydata or xdata contains NaNs
popt = [np.nan, np.nan, np.nan, np.nan, np.nan]
pcov = None
warns = '{0}: {1}'.format(type(e), e)
model = np.repeat([np.nan], len(x))
except RuntimeError as e:
popt = [np.nan, np.nan, np.nan, np.nan, np.nan]
pcov = None
warns = '{0}: {1}'.format(type(e), e)
model = np.repeat([np.nan], len(x))
# deal with returning model
if return_model:
return p0, popt, pcov, warns, model
else:
# return the guess and the best fit
return p0, popt, pcov, warns
def remove_wide_peaks(params, props, cutwidth):
"""
Remove peaks that are too wide
:param p: parameter dictionary, ParamDict containing constants
:param loc: parameter dictionary, ParamDict containing data
Must contain at least:
ordpeak: numpy array (1D), the order number for each valid FP
peak
xpeak: numpy array (1D), the central position each gaussain fit
to valid FP peak
ewpeak: numpy array (1D), the FWHM of each gaussain fit
to valid FP peak
vrpeak: numpy array (1D), the radial velocity drift for each
valid FP peak
llpeak: numpy array (1D), the delta wavelength for each valid
FP peak
amppeak: numpy array (1D), the amplitude for each valid FP peak
:param expwidth: float or None, the expected width of FP peaks - used to
"normalise" peaks (which are then subsequently removed
if > "cutwidth") if expwidth is None taken from
p['DRIFT_PEAK_EXP_WIDTH']
:param cutwidth: float or None, the normalised width of FP peaks thatis too
large normalised width FP FWHM - expwidth
cut is essentially: FP FWHM < (expwidth + cutwidth), if
cutwidth is None taken from p['DRIFT_PEAK_NORM_WIDTH_CUT']
:return loc: parameter dictionary, the updated parameter dictionary
Adds/updates the following:
ordpeak: numpy array (1D), the order number for each valid FP
peak (masked to remove wide peaks)
xpeak: numpy array (1D), the central position each gaussain fit
to valid FP peak (masked to remove wide peaks)
ewpeak: numpy array (1D), the FWHM of each gaussain fit
to valid FP peak (masked to remove wide peaks)
vrpeak: numpy array (1D), the radial velocity drift for each
valid FP peak (masked to remove wide peaks)
llpeak: numpy array (1D), the delta wavelength for each valid
FP peak (masked to remove wide peaks)
amppeak: numpy array (1D), the amplitude for each valid FP peak
(masked to remove wide peaks)
"""
func_name = __NAME__ + '.remove_wide_peaks()'
# define a mask to cut out wide peaks
mask = np.array(props['PEAK2PEAK']) < cutwidth
# apply mask
props['ORDPEAK'] = props['ORDPEAK'][mask]
props['XPEAK'] = props['XPEAK'][mask]
props['PEAK2PEAK'] = props['PEAK2PEAK'][mask]
props['VRPEAK'] = props['VRPEAK'][mask]
props['LLPEAK'] = props['LLPEAK'][mask]
props['AMPPEAK'] = props['AMPPEAK'][mask]
# check for and remove double-fitted lines
# save old position
props['XPEAK_OLD'] = np.copy(props['XPEAK'])
props['ORDPEAK_OLD'] = np.copy(props['ORDPEAK'])
# set up storage for good lines
ordpeak_k, xpeak_k, ewpeak_k, vrpeak_k = [], [], [], []
llpeak_k, amppeak_k = [], []
# loop through the orders
for order_num in range(np.shape(props['SPEREF'])[0]):
# set up mask for the order
gg = props['ORDPEAK'] == order_num
# get the xvalues
xpeak = props['XPEAK'][gg]
# get the amplitudes
amppeak = props['AMPPEAK'][gg]
# get peak spacing
peak_spacing = props['PEAKSIZE'][order_num] / 2
# get the points where two peaks are spaced by < peak_spacing
ind = np.argwhere(xpeak[1:] - xpeak[:-1] < peak_spacing)
# get the indices of the second peak of each pair
ind2 = ind + 1
# initialize mask with the same size as xpeak
xmask = np.ones(len(xpeak), dtype=bool)
# mask the peak with the lower amplitude of the two
for i in range(len(ind)):
if amppeak[ind[i]] < amppeak[ind2[i]]:
xmask[ind[i]] = False
else:
xmask[ind2[i]] = False
# save good lines
ordpeak_k += list(props['ORDPEAK'][gg][xmask])
xpeak_k += list(props['XPEAK'][gg][xmask])
ewpeak_k += list(props['PEAK2PEAK'][gg][xmask])
vrpeak_k += list(props['VRPEAK'][gg][xmask])
llpeak_k += list(props['LLPEAK'][gg][xmask])
amppeak_k += list(props['AMPPEAK'][gg][xmask])
# replace FP peak arrays in loc
props['ORDPEAK'] = np.array(ordpeak_k)
props['XPEAK'] = np.array(xpeak_k)
props['PEAK2PEAK'] = np.array(ewpeak_k)
props['VRPEAK'] = np.array(vrpeak_k)
props['LLPEAK'] = np.array(llpeak_k)
props['AMPPEAK'] = np.array(amppeak_k)
# append this function to sources
keys = ['ordpeak', 'xpeak', 'PEAK2PEAK', 'vrpeak', 'llpeak', 'amppeak']
props.append_sources(keys, func_name)
# log number of lines removed for suspicious width
wargs = [mp.nansum(~mask)]
WLOG(params, 'info', TextEntry('40-018-00003', args=wargs))
# log number of lines removed as double-fitted
wargs = [len(props['XPEAK_OLD']) - len(props['XPEAK'])]
WLOG(params, 'info', TextEntry('40-018-00004', args=wargs))
# return props
return props
def get_ccf_mask(params, filename, mask_width, mask_units='nm'):
func_name = __NAME__ + '.get_ccf_mask()'
# load table
table, absfilename = drs_data.load_ccf_mask(params, filename=filename)
# convert to floats
ll_mask_e = np.array(table['ll_mask_e']).astype(float)
ll_mask_s = np.array(table['ll_mask_s']).astype(float)
# calculate the difference in mask_e and mask_s
ll_mask_d = ll_mask_e - ll_mask_s
ll_mask_ctr = ll_mask_s + ll_mask_d * 0.5
# if mask_width > 0 ll_mask_d is multiplied by mask_width/c
if mask_width > 0:
ll_mask_d = mask_width * ll_mask_s / speed_of_light
# make w_mask an array
w_mask = np.array(table['w_mask']).astype(float)
# ----------------------------------------------------------------------
# deal with the units of ll_mask_d and ll_mask_ctr
# must be returned in nanometers
# ----------------------------------------------------------------------
# get unit object from mask units string
try:
unit = getattr(uu, mask_units)
except Exception as e:
# log error
eargs = [mask_units, type(e), e, func_name]
WLOG(params, 'error', TextEntry('09-020-00002', args=eargs))
return None, None, None
# add units
ll_mask_d = ll_mask_d * unit
ll_mask_ctr = ll_mask_ctr * unit
# convert to nanometers
ll_mask_d = ll_mask_d.to(uu.nm).value
ll_mask_ctr = ll_mask_ctr.to(uu.nm).value
# ----------------------------------------------------------------------
# return the size of each pixel, the central point of each pixel
# and the weight mask
return ll_mask_d, ll_mask_ctr, w_mask
def delta_v_rms_2d(spe, wave, sigdet, threshold, size):
"""
Compute the photon noise uncertainty for all orders (for the 2D image)
:param spe: numpy array (2D), the extracted spectrum
size = (number of orders by number of columns (x-axis))
:param wave: numpy array (2D), the wave solution for each pixel
:param sigdet: float, the read noise (sigdet) for calculating the
noise array
:param threshold: float, upper limit for pixel values, above this limit
pixels are regarded as saturated
:param size: int, size (in pixels) around saturated pixels to also regard
as bad pixels
:return dvrms2: numpy array (1D), the photon noise for each pixel (squared)
:return weightedmean: float, weighted mean photon noise across all orders
"""
# flag (saturated) fluxes above threshold as "bad pixels"
with warnings.catch_warnings(record=True) as _:
flag = spe < threshold
# flag all fluxes around "bad pixels" (inside +/- size of the bad pixel)
for i_it in range(1, 2 * size, 1):
flag[:, size:-size] *= flag[:, i_it: i_it - 2 * size]
# get the wavelength normalised to the wavelength spacing
nwave = wave[:, 1:-1] / (wave[:, 2:] - wave[:, :-2])
# get the flux + noise array
sxn = (spe[:, 1:-1] + sigdet ** 2)
# get the flux difference normalised to the flux + noise
nspe = (spe[:, 2:] - spe[:, :-2]) / sxn
# get the mask value
maskv = flag[:, 2:] * flag[:, 1:-1] * flag[:, :-2]
# get the total per order
tot = mp.nansum(sxn * ((nwave * nspe) ** 2) * maskv, axis=1)
# convert to dvrms2
with warnings.catch_warnings(record=True) as _:
dvrms2 = (speed_of_light_ms ** 2) / abs(tot)
# weighted mean of dvrms2 values
weightedmean = 1. / np.sqrt(mp.nansum(1.0 / dvrms2))
# per order value
weightedmeanorder = np.sqrt(dvrms2)
# return dv rms and weighted mean
return dvrms2, weightedmean, weightedmeanorder
def remove_telluric_domain(params, recipe, infile, fiber, **kwargs):
func_name = __NAME__ + '.remove_telluric_domain()'
# get parameters from params/kwargs
ccf_tellu_thres = pcheck(params, 'CCF_TELLU_THRES', 'ccf_tellu_thres',
kwargs, func_name)
# get the image
image = np.array(infile.data)
# get extraction type from the header
ext_type = infile.get_key('KW_EXT_TYPE', dtype=str)
# get the input file (assumed to be the first file from header
e2dsfiles = infile.read_header_key_1d_list('KW_INFILE1', dim1=None,
dtype=str)
e2dsfilename = e2dsfiles[0]
# construct absolute path for the e2ds file
e2dsabsfilename = os.path.join(infile.path, e2dsfilename)
# check that e2ds file exists
if not os.path.exists(e2dsabsfilename):
eargs = [infile.filename, ext_type, e2dsabsfilename]
WLOG(params, 'error', TextEntry('09-020-00003', args=eargs))
# get infile
e2dsinst = core.get_file_definition(ext_type, params['INSTRUMENT'],
kind='red')
# construct e2ds file
e2dsfile = e2dsinst.newcopy(recipe=recipe, fiber=fiber)
e2dsfile.set_filename(e2dsfilename)
# get recon file
reconinst = core.get_file_definition('TELLU_RECON', params['INSTRUMENT'],
kind='red')
# construct recon file
reconfile = reconinst.newcopy(recipe=recipe, fiber=fiber)
reconfile.construct_filename(params, infile=e2dsfile)
# check recon file exists
if not os.path.exists(reconfile.filename):
eargs = [infile.filename, reconfile.name, e2dsfile.filename]
WLOG(params, 'error', TextEntry('09-020-00003', args=eargs))
# read recon file
reconfile.read_file()
# find all places below threshold
with warnings.catch_warnings(record=True) as _:
keep = reconfile.data > ccf_tellu_thres
# set all bad data to NaNs
image[~keep] = np.nan
# return in file
return image
def fill_e2ds_nans(params, image, **kwargs):
func_name = __NAME__ + '.fill_e2ds_nans()'
# get parameters from params/kwargs
kernel_size = pcheck(params, 'CCF_FILL_NAN_KERN_SIZE', 'kernel_size',
kwargs, func_name)
kernel_res = pcheck(params, 'CCF_FILL_NAN_KERN_RES', 'kernel_res',
kwargs, func_name)
# check whether we have NaNs
if np.sum(np.isnan(image)) == 0:
return image
# create a kernel to fill in the NaN gaps
xker = np.arange(-kernel_size, kernel_size + kernel_res, kernel_res)
kernel = np.exp(-0.5 * (xker ** 2))
kernel /= np.sum(kernel)
# log that NaNs were found
WLOG(params, 'warning', TextEntry('10-020-00002'))
# copy original image
image2 = np.array(image)
# loop around orders
for order_num in np.arange(image.shape[0]):
# get the vector for this order
oimage = np.array(image2[order_num])
# find all the nan pixels in this order
nanmask = np.isnan(oimage)
# convert the nanmask to floats (for convolution)
floatmask = (~nanmask).astype(float)
# set the NaN values in image to zero
oimage[nanmask] = 0.0
# convolve the NaN mask with the kernel
smooth_mask = np.convolve(floatmask, kernel, mode='same')
smooth_data = np.convolve(oimage, kernel, mode='same')
# calculate the smooth data (this is what is replaced)
smooth = smooth_data / smooth_mask
# set the NaN values to the smooth value
image2[order_num][nanmask] = smooth[nanmask]
# return the filled e2ds
return image2
def locate_reference_file(params, recipe, infile):
# set function name
func_name = display_func(params, 'locate_reference_file', __NAME__)
# get pp file name
# TODO: fix how we get pp file
pp_filename = infile.filename.split('_pp')[0] + '_pp.fits'
# get pseudo const
pconst = constants.pload(params['INSTRUMENT'])
# get reference fiber
_, reffiber = pconst.FIBER_KINDS()
# deal with infile being telluric file (we do not have reference file
# for telluric files) --> must use the telluric files "intype file"
if infile.name == 'TELLU_OBJ':
instance = infile.intype
else:
instance = infile
# get pp file
ppfile = instance.intype.newcopy(recipe=recipe)
ppfile.set_filename(pp_filename)
# check that ppfile is a ppfile
if ppfile.suffix != '_pp':
# log that we could not locate reference file for file
eargs = [infile.name, ppfile.name, infile.filename, func_name]
WLOG(params, 'error', TextEntry('00-020-00003', args=eargs))
# make a new copy of this instance
outfile = instance.newcopy(recipe=recipe, fiber=reffiber)
# construct filename
outfile.construct_filename(params, infile=ppfile)
# read outfile
outfile.read_file()
# return outfile
return outfile
# =============================================================================
# Define CCF calculation functions
# =============================================================================
def compute_ccf_science(params, recipe, infile, image, blaze, wavemap, bprops,
fiber, **kwargs):
func_name = __NAME__ + '.compute_ccf()'
# get parameters from params/kwargs
noise_sigdet = pcheck(params, 'CCF_NOISE_SIGDET', 'noise_sigdet', kwargs,
func_name)
noise_size = pcheck(params, 'CCF_NOISE_BOXSIZE', 'noise_size', kwargs,
func_name)
noise_thres = pcheck(params, 'CCF_NOISE_THRES', 'noise_thres', kwargs,
func_name)
# TODO: Remove this (not used any more)
mask_min = pcheck(params, 'CCF_MASK_MIN_WEIGHT', 'mask_min', kwargs,
func_name)
mask_width = pcheck(params, 'CCF_MASK_WIDTH', 'mask_width', kwargs,
func_name)
mask_units = pcheck(params, 'CCF_MASK_UNITS', 'mask_units', kwargs,
func_name)
fit_type = pcheck(params, 'CCF_FIT_TYPE', 'fit_type', kwargs, func_name)
ccfnmax = pcheck(params, 'CCF_N_ORD_MAX', 'ccfnmax', kwargs,
func_name)
image_pixel_size = pcheck(params, 'IMAGE_PIXEL_SIZE', 'image_pixel_size',
kwargs, func_name)
null_targetrv = pcheck(params, 'CCF_OBJRV_NULL_VAL', 'null_targetrv',
kwargs, func_name)
maxwsr = pcheck(params, 'CCF_MAX_CCF_WID_STEP_RATIO', 'maxwsr', kwargs,
func_name)
# get image size
nbo, nbpix = image.shape
# get parameters from inputs
ccfstep = params['INPUTS']['STEP']
ccfwidth = params['INPUTS']['WIDTH']
targetrv = params['INPUTS']['RV']
# ----------------------------------------------------------------------
# TODO: eventually this should come from object database (so that each
# TODO: object has a constant target rv
# need to deal with no target rv step
if np.isnan(targetrv):
targetrv = infile.get_key('KW_INPUTRV', required=False, dtype=float)
# set target rv to zero if we don't have a value
if targetrv is None:
wargs = [params['KW_INPUTRV'][0], infile.filename]
WLOG(params, 'warning', TextEntry('09-020-00006', args=wargs))
targetrv = 0.0
elif np.abs(targetrv) > null_targetrv:
wargs = [params['KW_INPUTRV'][0], null_targetrv, infile.filename]
WLOG(params, 'warning', TextEntry('09-020-00007', args=wargs))
targetrv = 0.0
# ----------------------------------------------------------------------
# need to deal with mask coming from inputs
if isinstance(params['INPUTS']['MASK'], list):
ccfmask = params['INPUTS']['MASK'][0][0]
# else mask has come from constants
else:
ccfmask = params['INPUTS']['MASK']
# get the berv
berv = bprops['USE_BERV']
# ----------------------------------------------------------------------
# Need some sanity checking on width and step
# ----------------------------------------------------------------------
if ccfstep > (ccfwidth / maxwsr):
eargs = [ccfwidth, ccfstep, maxwsr, func_name]
WLOG(params, 'error', TextEntry('09-020-00005', args=eargs))
# ----------------------------------------------------------------------
# Check we are using correct fiber
# ----------------------------------------------------------------------
pconst = constants.pload(params['INSTRUMENT'])
sfiber, rfiber = pconst.FIBER_CCF()
if fiber != sfiber:
# log that the science fiber was not correct
eargs = [fiber, sfiber, infile.name, infile.filename]
WLOG(params, 'error', TextEntry('09-020-00001', args=eargs))
# ----------------------------------------------------------------------
# Compute photon noise uncertainty for reference file
# ----------------------------------------------------------------------
# set up the arguments for DeltaVrms2D
dkwargs = dict(spe=image, wave=wavemap, sigdet=noise_sigdet,
size=noise_size, threshold=noise_thres)
# run DeltaVrms2D
dvrmsref, wmeanref, wmeanrefo = delta_v_rms_2d(**dkwargs)
# log the estimated RV uncertainty
wargs = [fiber, wmeanref]
WLOG(params, 'info', TextEntry('40-020-00003', args=wargs))
# ----------------------------------------------------------------------
# Do the CCF calculations
# ----------------------------------------------------------------------
# get the mask parameters
mkwargs = dict(filename=ccfmask, mask_width=mask_width,
mask_units=mask_units)
ll_mask_d, ll_mask_ctr, w_mask = get_ccf_mask(params, **mkwargs)
# calculate the CCF
props = ccf_calculation(params, image, blaze, wavemap, berv, targetrv,
ccfwidth, ccfstep, ll_mask_ctr, w_mask,
fit_type, fiber)
# ----------------------------------------------------------------------
# Reference plots
# ----------------------------------------------------------------------
# the image vs wavelength for an order
recipe.plot('CCF_SWAVE_REF', wavemap=wavemap, image=image, fiber=fiber,
nbo=nbo)
# the photon noise uncertainty plot
recipe.plot('CCF_PHOTON_UNCERT', x=np.arange(nbo), y_sp=wmeanrefo,
y_cc=props['CCF_NOISE'])
# as a summary plot
recipe.plot('SUM_CCF_PHOTON_UNCERT', x=np.arange(nbo), y_sp=wmeanrefo,
y_cc=props['CCF_NOISE'])
# ----------------------------------------------------------------------
# Calculate the mean CCF
# ----------------------------------------------------------------------
# get the average ccf
mean_ccf = mp.nanmean(props['CCF'][: ccfnmax], axis=0)
# get the fit for the normalized average ccf
mean_ccf_coeffs, mean_ccf_fit = fit_ccf(params, 'mean', props['RV_CCF'],
mean_ccf, fit_type=fit_type)
# get the max cpp
# TODO: How do we calculate max_cpp and what is it? Do we need it?
# max_cpp = mp.nansum(props['CCF_MAX']) / mp.nansum(props['PIX_PASSED_ALL'])
# get the RV value from the normalised average ccf fit center location
ccf_rv = float(mean_ccf_coeffs[1])
# get the contrast (ccf fit amplitude)
ccf_contrast = np.abs(100 * mean_ccf_coeffs[0])
# get the FWHM value
ccf_fwhm = mean_ccf_coeffs[2] * mp.fwhm()
# ----------------------------------------------------------------------
# combined CCF_NOISE uncertainty
rv_noise = 1.0 / np.sqrt(mp.nansum(1.0 / props['CCF_NOISE'] ** 2))
# ----------------------------------------------------------------------
# log the stats
wargs = [ccf_contrast, float(mean_ccf_coeffs[1]), rv_noise, ccf_fwhm]
WLOG(params, 'info', TextEntry('40-020-00004', args=wargs))
# ----------------------------------------------------------------------
# add to output array
props['TOT_SPEC_RMS'] = wmeanref
props['ORD_SPEC_RMS'] = wmeanrefo
props['MEAN_CCF'] = mean_ccf
props['MEAN_RV'] = ccf_rv
props['MEAN_CONTRAST'] = ccf_contrast
props['MEAN_FWHM'] = ccf_fwhm
props['MEAN_CCF_RES'] = mean_ccf_coeffs
props['MEAN_CCF_FIT'] = mean_ccf_fit
props['MEAN_RV_NOISE'] = rv_noise
# set the source
keys = ['TOT_SPEC_RMS', 'ORD_SPEC_RMS', 'MEAN_CCF', 'MEAN_RV',
'MEAN_CONTRAST', 'MEAN_FWHM', 'MEAN_CCF_RES', 'MEAN_CCF_FIT',
'MEAN_RV_NOISE']
props.set_sources(keys, func_name)
# add constants to props
props['CCF_MASK'] = ccfmask
props['CCF_STEP'] = ccfstep
props['CCF_WIDTH'] = ccfwidth
props['TARGET_RV'] = targetrv
props['CCF_SIGDET'] = noise_sigdet
props['CCF_BOXSIZE'] = noise_size
props['CCF_MAXFLUX'] = noise_thres
props['CCF_NMAX'] = ccfnmax
props['MASK_MIN'] = mask_min
props['MASK_WIDTH'] = mask_width
props['MASK_UNITS'] = mask_units
# set source
keys = ['CCF_MASK', 'CCF_STEP', 'CCF_WIDTH', 'TARGET_RV', 'CCF_SIGDET',
'CCF_BOXSIZE', 'CCF_MAXFLUX', 'CCF_NMAX', 'MASK_MIN', 'MASK_WIDTH',
'MASK_UNITS']
props.set_sources(keys, func_name)
# ------------------------------------------------------------------
# rv ccf plot
# ------------------------------------------------------------------
# loop around every order
recipe.plot('CCF_RV_FIT_LOOP', params=params, x=props['RV_CCF'],
y=props['CCF'], yfit=props['CCF_FIT'], kind='SCIENCE',
rv=props['CCF_FIT_COEFFS'][:, 1], ccfmask=ccfmask,
orders=np.arange(len(props['CCF'])), order=None)
# the mean ccf
recipe.plot('CCF_RV_FIT', params=params, x=props['RV_CCF'],
y=mean_ccf, yfit=mean_ccf_fit, kind='MEAN SCIENCE',
rv=ccf_rv, ccfmask=ccfmask,
orders=None, order=None)
# the mean ccf for summary
recipe.plot('SUM_CCF_RV_FIT', params=params, x=props['RV_CCF'],
y=mean_ccf, yfit=mean_ccf_fit, kind='MEAN SCIENCE',
rv=ccf_rv, ccfmask=ccfmask,
orders=None, order=None)
# ------------------------------------------------------------------
# return property dictionary
return props
def compute_ccf_fp(params, recipe, infile, image, blaze, wavemap, fiber,
sum_plot=True, **kwargs):
func_name = __NAME__ + '.compute_ccf_fp()'
# get constants from params/kwargs
noise_sigdet = pcheck(params, 'WAVE_CCF_NOISE_SIGDET', 'sigdet', kwargs,
func_name)
noise_size = pcheck(params, 'WAVE_CCF_NOISE_BOXSIZE', 'boxsize', kwargs,
func_name)
noise_thres = pcheck(params, 'WAVE_CCF_NOISE_THRES', 'maxflux', kwargs,
func_name)
ccfstep = pcheck(params, 'WAVE_CCF_STEP', 'ccfstep', kwargs, func_name)
ccfwidth = pcheck(params, 'WAVE_CCF_WIDTH', 'ccfwidth', kwargs, func_name)
targetrv = pcheck(params, 'WAVE_CCF_TARGET_RV', 'targetrv', kwargs,
func_name)
ccfmask = pcheck(params, 'WAVE_CCF_MASK', 'ccfmask', kwargs, func_name)
ccfnmax = pcheck(params, 'WAVE_CCF_N_ORD_MAX', 'ccfnmax', kwargs,
func_name)
mask_min = pcheck(params, 'WAVE_CCF_MASK_MIN_WEIGHT', 'mask_min', kwargs,
func_name)
mask_width = pcheck(params, 'WAVE_CCF_MASK_WIDTH', 'mask_width', kwargs,
func_name)
mask_units = pcheck(params, 'WAVE_CCF_MASK_UNITS', 'mask_units', kwargs,
func_name)
image_pixel_size = pcheck(params, 'IMAGE_PIXEL_SIZE', 'image_pixel_size',
kwargs, func_name)
# set the berv to zero (fp have no berv)
berv = 0
# the fit type must be set to 1 (for emission lines)
fit_type = 1
# ------------------------------------------------------------------
# Compute photon noise uncertainty for FP
# ------------------------------------------------------------------
# set up the arguments for DeltaVrms2D
dkwargs = dict(spe=image, wave=wavemap, sigdet=noise_sigdet,
size=noise_size, threshold=noise_thres)
# run DeltaVrms2D
dvrmsref, wmeanref, wmeanrefo = delta_v_rms_2d(**dkwargs)
# log the estimated RV uncertainty
wargs = [fiber, wmeanref]
WLOG(params, 'info', TextEntry('40-017-00028', args=wargs))
# ----------------------------------------------------------------------
# Do the CCF calculations
# ----------------------------------------------------------------------
# get the mask parameters
mkwargs = dict(filename=ccfmask, mask_width=mask_width,
mask_units=mask_units)
ll_mask_d, ll_mask_ctr, w_mask = get_ccf_mask(params, **mkwargs)
# calculate the CCF
props = ccf_calculation(params, image, blaze, wavemap, berv, targetrv,
ccfwidth, ccfstep, ll_mask_ctr, w_mask, fit_type,
fiber)
# ----------------------------------------------------------------------
# Calculate the mean CCF
# ----------------------------------------------------------------------
# get the average ccf
mean_ccf = mp.nanmean(props['CCF'][: ccfnmax], axis=0)
# get the fit for the normalized average ccf
mean_ccf_coeffs, mean_ccf_fit = fit_ccf(params, 'mean', props['RV_CCF'],
mean_ccf, fit_type=fit_type)
# get the max cpp
# TODO: How do we calculate max_cpp and what is it? Do we need it?
# max_cpp = mp.nansum(props['CCF_MAX']) / mp.nansum(props['PIX_PASSED_ALL'])
# get the RV value from the normalised average ccf fit center location
ccf_rv = float(mean_ccf_coeffs[1])
# get the contrast (ccf fit amplitude)
ccf_contrast = np.abs(100 * mean_ccf_coeffs[0])
# get the FWHM value
ccf_fwhm = mean_ccf_coeffs[2] * mp.fwhm()
# ----------------------------------------------------------------------
# combined CCF_NOISE uncertainty
rv_noise = 1.0 / np.sqrt(mp.nansum(1.0 / props['CCF_NOISE'] ** 2))
# ----------------------------------------------------------------------
# log the stats
wargs = [ccf_contrast, float(mean_ccf_coeffs[1]), rv_noise, ccf_fwhm]
WLOG(params, 'info', TextEntry('40-020-00004', args=wargs))
# ----------------------------------------------------------------------
# add to output array
props['TOT_SPEC_RMS'] = wmeanref
props['ORD_SPEC_RMS'] = wmeanrefo
props['MEAN_CCF'] = mean_ccf
props['MEAN_RV'] = ccf_rv
props['MEAN_CONTRAST'] = ccf_contrast
props['MEAN_FWHM'] = ccf_fwhm
props['MEAN_CCF_COEFFS'] = mean_ccf_coeffs
props['MEAN_CCF_FIT'] = mean_ccf_fit
props['MEAN_RV_NOISE'] = rv_noise
# set the source
keys = ['TOT_SPEC_RMS', 'ORD_SPEC_RMS', 'MEAN_CCF', 'MEAN_RV',
'MEAN_CONTRAST', 'MEAN_FWHM', 'MEAN_CCF_COEFFS', 'MEAN_CCF_FIT',
'MEAN_RV_NOISE']
props.set_sources(keys, func_name)
# add constants to props
props['CCF_MASK'] = ccfmask
props['CCF_STEP'] = ccfstep
props['CCF_WIDTH'] = ccfwidth
props['TARGET_RV'] = targetrv
props['CCF_SIGDET'] = noise_sigdet
props['CCF_BOXSIZE'] = noise_size
props['CCF_MAXFLUX'] = noise_thres
props['CCF_NMAX'] = ccfnmax
props['MASK_MIN'] = mask_min
props['MASK_WIDTH'] = mask_width
props['MASK_UNITS'] = mask_units
# set source
keys = ['CCF_MASK', 'CCF_STEP', 'CCF_WIDTH', 'TARGET_RV', 'CCF_SIGDET',
'CCF_BOXSIZE', 'CCF_MAXFLUX', 'CCF_NMAX', 'MASK_MIN', 'MASK_WIDTH',
'MASK_UNITS']
props.set_sources(keys, func_name)
# ----------------------------------------------------------------------
# rv ccf plot
# ----------------------------------------------------------------------
# loop around every order
recipe.plot('CCF_RV_FIT_LOOP', params=params, x=props['RV_CCF'],
y=props['CCF'], yfit=props['CCF_FIT'],
kind='FP fiber={0}'.format(fiber),
rv=props['CCF_FIT_COEFFS'][:, 1], ccfmask=ccfmask,
orders=np.arange(len(props['CCF'])), order=None)
# the mean ccf
recipe.plot('CCF_RV_FIT', params=params, x=props['RV_CCF'],
y=mean_ccf, yfit=mean_ccf_fit,
kind='MEAN FP fiber={0}'.format(fiber),
rv=props['MEAN_CCF_COEFFS'][1], ccfmask=ccfmask,
orders=None, order=None)
# the mean ccf for summary
if sum_plot:
recipe.plot('SUM_CCF_RV_FIT', params=params, x=props['RV_CCF'],
y=mean_ccf, yfit=mean_ccf_fit,
kind='MEAN FP fiber={0}'.format(fiber),
rv=ccf_rv, ccfmask=ccfmask,
orders=None, order=None)
# TODO : Add QC of the FP CCF once they are defined
# return the rv props
return props
def ccf_calculation(params, image, blaze, wavemap, berv, targetrv, ccfwidth,
ccfstep, mask_centers, mask_weights, fit_type, fiber,
**kwargs):
# set function name
func_name = display_func(params, 'calculate_ccf', __NAME__)
# get properties from params
blaze_norm_percentile = pcheck(params, 'CCF_BLAZE_NORM_PERCENTILE',
'blaze_norm_percentile', kwargs, func_name)
blaze_threshold = pcheck(params, 'WAVE_FP_BLAZE_THRES', 'blaze_threshold',
kwargs, func_name)
# get rvmin and rvmax
rvmin = targetrv - ccfwidth
rvmin = pcheck(params, 'RVMIN', 'rvmin', kwargs, func_name, default=rvmin)
rvmax = targetrv + ccfwidth + ccfstep
rvmax = pcheck(params, 'RVMAX', 'rvmax', kwargs, func_name, default=rvmax)
# get the dimensions
nbo, nbpix = image.shape
# create a rv ccf range
rv_ccf = np.arange(rvmin, rvmax, ccfstep)
# storage of the ccf
ccf_all = []
ccf_noise_all = []
ccf_all_fit = []
ccf_all_results = []
ccf_lines = []
ccf_all_snr = []
ccf_norm_all = []
# ----------------------------------------------------------------------
# loop around the orders
for order_num in range(nbo):
# log the process
WLOG(params, '', TextEntry('40-020-00005', args=[fiber, order_num]))
# ------------------------------------------------------------------
# get this orders values
wa_ord = np.array(wavemap[order_num])
sp_ord = np.array(image[order_num])
bl_ord = np.array(blaze[order_num])
# we express sp_ord as a flux in photons per km/s
grad = speed_of_light * np.gradient(wa_ord)/wa_ord
sp_ord = sp_ord / grad
# normalize per-ord blaze to its peak value
# this gets rid of the calibration lamp SED
bl_ord /= np.nanpercentile(bl_ord, blaze_norm_percentile)
# change NaNs in blaze to zeros
bl_ord[~np.isfinite(bl_ord)] = 0.0
# mask on the blaze
with warnings.catch_warnings(record=True) as _:
blazemask = bl_ord > blaze_threshold
# get order mask centers and mask weights
min_ord_wav = mp.nanmin(wa_ord[blazemask])
max_ord_wav = mp.nanmax(wa_ord[blazemask])
# adjust for rv shifts
# min_ord_wav = min_ord_wav * (1 - rvmin / speed_of_light)
# max_ord_wav = max_ord_wav * (1 - rvmax / speed_of_light)
# mask the ccf mask by the order length
mask_wave_mask = (mask_centers > min_ord_wav)
mask_wave_mask &= (mask_centers < max_ord_wav)
omask_centers = mask_centers[mask_wave_mask]
omask_weights = mask_weights[mask_wave_mask]
# ------------------------------------------------------------------
# find any places in spectrum or blaze where pixel is NaN
nanmask = np.isnan(sp_ord) | np.isnan(bl_ord)
# ------------------------------------------------------------------
# deal with no valid lines
if np.sum(mask_wave_mask) == 0:
# log all NaN
wargs = [order_num, min_ord_wav, max_ord_wav]
WLOG(params, 'warning', TextEntry('10-020-00006', args=wargs))
# set all values to NaN
ccf_all.append(np.repeat(np.nan, len(rv_ccf)))
ccf_all_fit.append(np.repeat(np.nan, len(rv_ccf)))
ccf_all_results.append(np.repeat(np.nan, 4))
ccf_noise_all.append(np.nan)
ccf_lines.append(0)
ccf_all_snr.append(np.nan)
ccf_norm_all.append(np.nan)
continue
# ------------------------------------------------------------------
# deal with all nan
if np.sum(nanmask) == nbpix:
# log all NaN
wargs = [order_num]
WLOG(params, 'warning', TextEntry('10-020-00004', args=wargs))
# set all values to NaN
ccf_all.append(np.repeat(np.nan, len(rv_ccf)))
ccf_all_fit.append(np.repeat(np.nan, len(rv_ccf)))
ccf_all_results.append(np.repeat(np.nan, 4))
ccf_noise_all.append(np.nan)
ccf_lines.append(0)
ccf_all_snr.append(np.nan)
ccf_norm_all.append(np.nan)
continue
# ------------------------------------------------------------------
# set the spectrum or blaze NaN pixels to zero (dealt with by divide)
sp_ord[nanmask] = 0
bl_ord[nanmask] = 0
# now every value that is zero is masked (we don't want to spline these)
good = (sp_ord != 0) & (bl_ord != 0)
weight_ord = np.array(good, dtype=float)
# ------------------------------------------------------------------
# spline the spectrum and the blaze
spline_sp = mp.iuv_spline(wa_ord[good], sp_ord[good], k=5, ext=1)
spline_bl = mp.iuv_spline(wa_ord[good], bl_ord[good], k=5, ext=1)
spline_weight = mp.iuv_spline(wa_ord, weight_ord, k=1, ext=1)
# ------------------------------------------------------------------
# set up the ccf for this order
ccf_ord = np.zeros_like(rv_ccf)
# ------------------------------------------------------------------
# get the wavelength shift (dv) in relativistic way
wave_shifts = mp.relativistic_waveshift(rv_ccf - berv)
# ------------------------------------------------------------------
# propagating the extreme wave shifts to see if any lines fall off
# the domain that is considered valid for the spline
# find the wave grid for the first shift
wave_tmp_start = omask_centers * wave_shifts[0]
# find the wave grid for the last shift
wave_tmp_end = omask_centers * wave_shifts[-1]
# find the valid lines within these limits
# (ext=1 puts 0 when point is beyond domain)
valid_lines_start = spline_bl(wave_tmp_start) != 0
valid_lines_end = spline_bl(wave_tmp_end) != 0
# combine the valid masks for start and end
keep = valid_lines_start & valid_lines_end
# ------------------------------------------------------------------
# deal with no valid lines
if np.sum(keep) == 0:
# log all NaN
wargs = [order_num]
WLOG(params, 'warning', TextEntry('10-020-00007', args=wargs))
# set all values to NaN
ccf_all.append(np.repeat(np.nan, len(rv_ccf)))
ccf_all_fit.append(np.repeat(np.nan, len(rv_ccf)))
ccf_all_results.append(np.repeat(np.nan, 4))
ccf_noise_all.append(np.nan)
ccf_lines.append(0)
ccf_all_snr.append(np.nan)
ccf_norm_all.append(np.nan)
continue
# ------------------------------------------------------------------
# apply masks to centers and weights
omask_centers = omask_centers[keep]
omask_weights = omask_weights[keep]
# normalise omask weights by
omask_weights = omask_weights / np.nanmean(omask_weights)
# Number of photons at line centers for 1 CCF step
sweights = spline_weight(omask_centers)
nphot = spline_sp(omask_centers) * sweights / ccfstep
# Poisson noise is a bit bigger because of weights
wsum = np.sum(nphot*omask_weights)
wsum2 = np.sum(nphot*omask_weights**2)
# we can't calculate wnoise for negative values --> set to inf
if (wsum <= 0) or (wsum2 <= 0):
wargs = [order_num]
WLOG(params, 'warning', TextEntry('10-020-00008', args=wargs))
wsum, wnoise = 0.0, np.inf
else:
wnoise = np.sqrt(wsum2)
# ------------------------------------------------------------------
# set number of valid lines used to zero
numlines = 0
# loop around the rvs and calculate the CCF at this point
part3 = spline_bl(omask_centers)
for rv_element in range(len(rv_ccf)):
wave_tmp = omask_centers * wave_shifts[rv_element]
part1 = spline_sp(wave_tmp)
part2 = spline_bl(wave_tmp)
part4 = spline_weight(wave_tmp)
numlines = np.sum(spline_bl(wave_tmp) != 0)
# CCF is the division of the sums
with warnings.catch_warnings(record=True) as _:
ccf_element = ((part1 * part3) / part2) * omask_weights * part4
ccf_ord[rv_element] = mp.nansum(ccf_element)
# ------------------------------------------------------------------
# deal with NaNs in ccf
if np.sum(np.isnan(ccf_ord)) > 0:
# log all NaN
wargs = [order_num]
WLOG(params, 'warning', TextEntry('10-020-00005', args=wargs))
# set all values to NaN
ccf_all.append(np.repeat(np.nan, len(rv_ccf)))
ccf_all_fit.append(np.repeat(np.nan, len(rv_ccf)))
ccf_all_results.append(np.repeat(np.nan, 4))
ccf_noise_all.append(np.nan)
ccf_lines.append(0)
ccf_all_snr.append(np.nan)
ccf_norm_all.append(np.nan)
continue
# ------------------------------------------------------------------
# TODO -- check that its ok to remove the normalization
# TODO -- this should preserve the stellar flux weighting
# normalise each orders CCF to median
# TODO -- keep track of the norm factor, write a look-up table
# TODO -- with reasonable mid-M values and use these values for
# TODO -- all stars. At some point, have a temperature-dependent
# TODO -- LUT of weights.
ccf_norm = mp.nanmedian(ccf_ord)
# ccf_ord = ccf_ord / ccf_norm
# ------------------------------------------------------------------
# fit the CCF with a gaussian
fargs = [order_num, rv_ccf, ccf_ord, fit_type]
ccf_coeffs_ord, ccf_fit_ord = fit_ccf(params, *fargs)
# ------------------------------------------------------------------
# get the RV accuracy from Bouchy 2001 equation
dv_pix = (np.gradient(ccf_ord)/np.gradient(rv_ccf))/wnoise
# set the bad values for ccf noise and ccf snr --> NaN value is bad
if wsum == 0:
ccf_noise = np.nan
ccf_snr = np.nan
else:
ccf_noise = 1 / np.sqrt(np.nansum(dv_pix ** 2))
# ge the snr
ccf_snr = wsum / wnoise
# ------------------------------------------------------------------
# append ccf to storage
ccf_all.append(ccf_ord)
ccf_all_fit.append(ccf_fit_ord)
ccf_all_results.append(ccf_coeffs_ord)
ccf_noise_all.append(ccf_noise)
ccf_lines.append(numlines)
ccf_all_snr.append(ccf_snr)
ccf_norm_all.append(ccf_norm)
# store outputs in param dict
props = ParamDict()
props['RV_CCF'] = rv_ccf
props['CCF'] = np.array(ccf_all)
props['CCF_LINES'] = np.array(ccf_lines)
props['TOT_LINE'] = np.sum(ccf_lines)
props['CCF_NOISE'] = np.array(ccf_noise_all) * 1000 # [m/s]
props['CCF_SNR'] = np.array(ccf_all_snr)
props['CCF_FIT'] = np.array(ccf_all_fit)
props['CCF_FIT_COEFFS'] = | np.array(ccf_all_results) | numpy.array |
import unittest
import numpy as np
import time
import torch
from potts_deepflow import Potts_MAP1d,Potts_MAP2d,Potts_MAP3d
from potts_deepflow import Potts_Mean1d,Potts_Mean2d,Potts_Mean3d
b=1
c=3
x=2**12
epsilon = 0.01
def get_size_into(d):
x_used = int(x**(1/d)+0.001)
return tuple( [b,c]+[x_used for i in range(d)] ), tuple( [1,c]+[x_used for i in range(d)] ), tuple([i+2 for i in range(d)])
def test_no_smoothness(d,device,asserter):
print("Testing (no smoothness) \t Dim: " +str(d)+ " \t Dev: " + device)
size_info, size_red_info, axes = get_size_into(d)
data_t = np.random.normal(0,1,size=size_info).astype(np.float32)
data_w = np.random.normal(0,1,size=size_info).astype(np.float32)
data_rx = np.zeros(shape=size_info).astype(np.float32)
if d > 1:
data_ry = np.zeros(shape=size_info).astype(np.float32)
if d > 2:
data_rz = np.zeros(shape=size_info).astype(np.float32)
t = torch.tensor(data_t, device=torch.device(device))
t.requires_grad = True
w = torch.tensor(data_w, device=torch.device(device))
rx = torch.tensor(data_rx, device=torch.device(device))
rx.requires_grad = True
if d > 1:
ry = torch.tensor(data_ry, device=torch.device(device))
ry.requires_grad = True
if d > 2:
rz = torch.tensor(data_rz, device=torch.device(device))
rz.requires_grad = True
if d == 1:
oa = torch.exp(Potts_MAP1d.apply(t,rx))
om = Potts_Mean1d.apply(t,rx)
elif d == 2:
oa = torch.exp(Potts_MAP2d.apply(t,rx,ry))
om = Potts_Mean2d.apply(t,rx,ry)
elif d == 3:
oa = torch.exp(Potts_MAP3d.apply(t,rx,ry,rz))
om = Potts_Mean3d.apply(t,rx,ry,rz)
loss = torch.sum(w*om)
loss.backward()
oa_np = oa.detach().cpu().numpy()
om_np = om.detach().cpu().numpy()
ot_np = t.grad.detach().cpu().numpy()
#make sure not nan
asserter.assertFalse(np.any(np.isnan(oa_np)))
asserter.assertFalse(np.any(np.isnan(om_np)))
asserter.assertFalse(np.any(np.isnan(ot_np)))
#resize into more usable form
dt_np_l = [data_t[0,i,...].flatten() for i in range(c)]
dw_np_l = [data_w[0,i,...].flatten() for i in range(c)]
oa_np_l = [oa_np[0,i,...].flatten() for i in range(c)]
om_np_l = [om_np[0,i,...].flatten() for i in range(c)]
ot_np_l = [ot_np[0,i,...].flatten() for i in range(c)]
x_space = len(dt_np_l[0])
#ensure MAP assigns 1 to highest term and 0 to everything else
for i in range(x_space):
highest = max([o[i] for o in dt_np_l])
for ic in range(c):
if(dt_np_l[ic][i] == highest and oa_np_l[ic][i] < 0.5):
raise Exception(str(dt_np_l[ic][i])+"\t"+str([o[i] for o in dt_np_l])+"\t"+str(highest)+"\t"+str(oa_np_l[ic][i])+"\t"+str([o[i] for o in oa_np_l]))
if(dt_np_l[ic][i] < highest - epsilon and oa_np_l[ic][i] > 0.5):
raise Exception(str(dt_np_l[ic][i])+"\t"+str([o[i] for o in dt_np_l])+"\t"+str(highest)+"\t"+str(oa_np_l[ic][i])+"\t"+str([o[i] for o in oa_np_l]))
#ensure mean pass is equivalent to the data terms only
for i in range(c):
for val_df, val_d in zip(om_np_l[i],dt_np_l[i]):
if(abs(val_df-val_d) > epsilon):
raise Exception(str(val_df) + "\t" + str(val_d))
#ensure gradient wrt data terms are passed immediately through
for i in range(c):
for val_df, val_d in zip(ot_np_l[i],dw_np_l[i]):
if(abs(val_df-val_d) > epsilon):
raise Exception(str(val_df) + "\t" + str(val_d))
def test_smoothness_dom(d,device,asserter):
print("Testing (smoothness dom.) \t Dim: " +str(d)+ " \t Dev: " + device)
size_info, size_red_info, axes = get_size_into(d)
winner = int(np.random.uniform()*c)
data_t = 1*np.random.uniform(0,1,size=size_info).astype(np.float32)
data_t[:,winner,...] = 0.75
data_r = 100+0*np.random.uniform(size=size_info).astype(np.float32)
t = torch.tensor(data_t, device=torch.device(device))
r = torch.tensor(data_r, device=torch.device(device))
if d == 1:
oa = torch.exp(Potts_MAP1d.apply(t,r))
om = Potts_Mean1d.apply(t,r)
elif d == 2:
oa = torch.exp(Potts_MAP2d.apply(t,r,r))
om = Potts_Mean2d.apply(t,r,r)
elif d == 3:
oa = torch.exp(Potts_MAP3d.apply(t,r,r,r))
om = Potts_Mean3d.apply(t,r,r,r)
oa_np = oa.detach().cpu().numpy()
om_np = om.detach().cpu().numpy()
#make sure not nan
asserter.assertFalse(np.any( | np.isnan(oa_np) | numpy.isnan |
#! /usr/bin/env python
"""
WFIRST Infrared Nearby Galaxies Test Image Product Simulator
Produces input files for the WFIRST STIPS simulator
"""
import time
import subprocess
import resource
import gc
import numpy as np
from astropy import wcs
from astropy.io import fits, ascii
from astropy.table import Table
import dask.dataframe as dd
import wpipe as wp
try:
this_job = wp.ThisJob
specialprint = this_job.logprint
except AttributeError:
specialprint = print
class WingTips:
"""
Initialize WingTips object
"""
def __init__(self, infile=[], center=[0, 0], **kwargs):
gc.collect()
specialprint("Starting WingTips __init__ %f MB" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
if len(infile) == 0:
self.tab = np.array([])
else:
if isinstance(infile, str):
infile = [infile]
specialprint("Attempting read stips...")
self.tab = WingTips.read_stips(infile[0], **kwargs)
specialprint("After read_stips %f MB" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
if len(infile) > 1:
for i in range(1, len(infile)):
_tab = WingTips.read_stips(infile[i], **kwargs)
self.tab = np.vstack((self.tab, _tab))
center = WingTips.get_center(self.tab[:, 0], self.tab[:, 1])
self.center = center
self.n = self.tab.shape[0]
self.infile = infile
gc.collect()
''' Strip coordinates from WingTips object '''
def strip_radec(self, hasID=False):
_i = int(hasID)
self.tab = np.delete(self.tab, [_i, _i + 1], 1)
return None
''' Attach given RA-DEC to WingTips object'''
def attach_radec(self, radec, hasID=False):
if self.n != radec.shape[0]:
raise ValueError('Number of RA-DEC does not match sources')
_i = int(hasID)
self.tab = np.insert(self.tab, _i, radec.T, 1)
self.center = WingTips.get_center(radec[:, 0 + _i], radec[:, 1 + _i])
return None
''' Replace RA-DEC of WingTips object '''
def replace_radec(self, radec, hasID=False):
self.strip_radec(hasID)
self.attach_radec(radec, hasID)
return None
'''
Return random RA-DEC for given image or WingTips object
Optionally, specify center and image size desired
'''
def random_radec_for(self, other, shape=(4096, 4096), sample=False, n=0, hasID=False):
_i = int(hasID)
# try:
# if other.endswith('.fits'):
# return WingTips.random_radec(self.n,imfile=other)
# except AttributeError:
if not sample:
return WingTips.random_radec(self.n, center=other.center, shape=shape)
elif not bool(n):
return WingTips.sample_radec(n=self.n, radec1=False, radec2=other.tab[:, _i:_i + 1])
else:
return WingTips.sample_radec(n=n, radec1=self.tab[:, _i:_i + 1], radec2=other.tab[:, _i:_i + 1])
''' Merge two WingTips objects '''
def merge_with(self, other, hasRADEC=True, hasID=False):
if self.tab.shape[1] != other.tab.shape[1]:
raise ValueError('Number of columns does not match', self.tab.shape[1], other.tab.shape[1])
self.tab = np.vstack((self.tab, other.tab))
self.n = self.tab.shape[0]
self.infile.append(other.infile)
_i = int(hasID)
if hasRADEC:
self.center = WingTips.get_center(self.tab[:, 0 + _i], self.tab[:, 1 + _i])
return None
''' Convert flux to surface brightness for sersic profile galaxies '''
def flux_to_Sb(self, hasRADEC=True, hasID=False):
_i = int(hasID)
if hasRADEC:
_i = _i + 2
_f = self.tab[:, _i].astype(float)
_r = self.tab[:, _i + 3].astype(float)
_a = self.tab[:, _i + 5].astype(float)
_s = (0.5 * _f) / (np.pi * _r ** 2 * _a)
self.tab = np.delete(self.tab, _i, 1)
self.tab = np.insert(self.tab, _i, _s.T, 1)
return None
def write_stips(self, outfile='temp.txt', hasID=False, hasCmnt=False, saveID=False, ipac=False,
max_writing_packet=np.inf):
"""
Write out a STIPS input file
"""
gc.collect()
_tab = WingTips.get_tabular(self.tab, hasID, hasCmnt, saveID)
specialprint("After get_tabular %f MB" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
_nms = ('id', 'ra', 'dec', 'flux', 'type', 'n', 're', 'phi', 'ratio', 'notes')
_fmt = ('%10d', '%15.7f', '%15.7f', '%15.7f', '%8s', '%10.3f', '%15.7f', '%15.7f', '%15.7f', '%8s')
_t = Table(_tab, names=_nms)
del _tab
gc.collect()
specialprint("After astropy.Table %f MB" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
_length = len(_t)
_max = min(max_writing_packet, _length)
outfile = open(outfile, 'w')
for i in range(int(np.ceil(_length / _max))):
ascii.write(_t[i * _max:(i + 1) * _max], outfile,
format=['fixed_width_no_header', ['fixed_width', 'ipac'][ipac]][i == 0],
formats=dict(zip(_nms, _fmt)),
**[{'delimiter': ' ', 'delimiter_pad': ''}, {}][(ipac and i == 0)])
outfile.close()
del _t
gc.collect()
specialprint("After ascii.write %f MB" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
return specialprint('Wrote out %s \n' % outfile)
def append_stips(self, outfile='temp.txt', hasID=False, hasCmnt=False, saveID=False, startID=1, ipac=False,
max_writing_packet=np.inf):
"""
Append a STIPS input file
"""
gc.collect()
_tab = WingTips.get_tabular(self.tab, hasID, hasCmnt, saveID, startID)
specialprint("After get_tabular %f MB" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
#_nms = ('id', 'ra', 'dec', 'flux', 'type', 'n', 're', 'phi', 'ratio', 'notes')
_nms = ('0', '0.0', '0.00', '0.000', 'point', '1.000', '1.0000', '1.00000', '1.0000000', 'comment')
_fmt = ('%10d', '%15.7f', '%15.7f', '%15.7f', '%8s', '%10.3f', '%15.7f', '%15.7f', '%15.7f', '%8s')
_t = Table(_tab, names=_nms)
del _tab
gc.collect()
specialprint("After astropy.Table in append %f MB" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
_length = len(_t)
_max = min(max_writing_packet, _length)
outname = str(outfile)
outfile = open(outfile, 'a')
for i in range(int(np.ceil(_length / _max))):
ascii.write(_t[i * _max:(i + 1) * _max], outfile,
format=['fixed_width_no_header', ['fixed_width', 'ipac'][ipac]][i == 0],
formats=dict(zip(_nms, _fmt)),
**[{'delimiter': ' ', 'delimiter_pad': ''}, {}][(ipac and i == 0)])
outfile.close()
del _t
gc.collect()
specialprint("After ascii.write in append %f MB" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
rmline = str(startID+1)
specialprint(rmline)
command = "sed -i \'"+rmline+"d\' "+outname
_p=subprocess.run(command,shell=True)
return specialprint('Appended %s \n' % outfile)
@staticmethod
def from_scratch(flux, ra=[], dec=[], center=[], ID=[], Type=[], n=[], re=[], phi=[], ratio=[], notes=[],
outfile=None, max_writing_packet=np.inf):
"""
Build a WingTips class object from scratch
"""
gc.collect()
specialprint("Starting memory %f MB" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
_temp = WingTips()
_temp.n = len(flux)
_temp.infile = ['fromScratch']
#
if len(center) > 0:
_temp.center = center
if len(ra) == 0:
radec = _temp.random_radec_for(_temp)
ra, dec = radec[:, 0], radec[:, 1]
elif (len(ra) == len(dec)) & (len(ra) > 0):
_temp.center = WingTips.get_center(np.array(ra), np.array(dec))
else:
raise ValueError('Provide valid coordinate or center')
#
if (len(Type) == 0) | (Type is 'point') | (Type is 'sersic'):
if (len(Type) == 0) | (Type is 'point'):
Type = np.repeat(np.array(['point']), len(flux))
_ones = np.ones_like(flux)
n, re, phi, ratio = _ones, _ones, _ones, _ones
elif Type == 'sersic':
Type = np.repeat(np.array(['sersic']), len(flux))
elif len(Type) == _temp.n:
Type = np.array(Type)
specialprint("After defining Type %f MB" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
check = len(flux)
if len(ID) == _temp.n:
_tab = np.hstack((np.array(ID, ndmin=2).T, _tab))
del ID
if len(notes) == _temp.n:
_tab = np.hstack((_tab, np.array(notes, ndmin=2).T))
del notes
if outfile is None:
return _temp
elif check < 10000000:
_tab = np.array([ra, dec, flux, Type, n, re, phi, ratio], dtype='object').T
_temp.tab = np.array(_tab)
del _tab
gc.collect()
specialprint("After defining _temp.tab %f MB" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
_temp.write_stips(outfile, hasID=bool(ID), hasCmnt=bool(notes), saveID=bool(ID),
max_writing_packet=max_writing_packet)
elif check >= 10000000:
runs = np.int(np.floor(check/10000000)+1)
_tab = np.array([ra[0:10000000], dec[0:10000000], flux[0:10000000], Type[0:10000000], n[0:10000000], re[0:10000000], phi[0:10000000], ratio[0:10000000]], dtype='object').T
_temp.tab = np.array(_tab)
del _tab
gc.collect()
_temp.write_stips(outfile, hasID=bool(ID), hasCmnt=bool(notes), saveID=bool(ID),
max_writing_packet=max_writing_packet)
for i in range(runs-1):
index1 = (i+1)*10000000
index2 = (i+2)*10000000
if index1 > check-1:
continue
if index2 > check-1:
index2 = check-1
specialprint("INDEXES %i %i" % (index1,index2))
_tab = np.array([ra[index1:index2], dec[index1:index2], flux[index1:index2], Type[index1:index2], n[index1:index2], re[index1:index2], phi[index1:index2], ratio[index1:index2]], dtype='object').T
#print(ra[index1:index2])
#print(_tab)
_temp.tab = np.array(_tab)
del _tab
gc.collect()
_temp.append_stips(outfile, hasID=bool(ID), hasCmnt=bool(notes), saveID=bool(ID), startID=index1+1, max_writing_packet=max_writing_packet)
del _temp
gc.collect()
specialprint("OUT OF LOOP")
@staticmethod
def read_stips(infile, getRADEC=True, getID=False, getCmnt=False, **kwargs):
"""
Read in a STIPS input file in ascii format and
return corresponding NumPy array
"""
gc.collect()
include_names = getID * ['id'] + \
getRADEC * ['ra', 'dec'] + \
['flux', 'type', 'n', 're', 'phi', 'ratio'] + \
getCmnt * ['comment']
kwargs['usecols'] = include_names
_temp = dd.read_table(infile, sep='\s+', **kwargs).to_dask_array().compute()
specialprint("After dd.read_table > to_array > compute %f MB" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
return _temp
@staticmethod
def get_tabular(_tab, hasID=False, hasCmnt=False, saveID=False, startID=1):
"""
Return tabular lists for STIPS input file columns
"""
_i = int(hasID)
if ~saveID:
_n = _tab.shape[0]
#_ID = np.array(np.linspace(1, _n, _n), ndmin=2).T
_ID = np.array(np.linspace(startID, startID-1+_n, _n), ndmin=2).T
_tab = np.hstack((_ID, _tab[:, _i:]))
del _ID
specialprint("After ~saveID %f MB" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
if ~hasCmnt:
_cmnt = np.array(np.repeat(np.array(['comment']), _tab.shape[0], ), ndmin=2).T
_tab = np.hstack((_tab, _cmnt))
del _cmnt
specialprint("After ~hasCmnt %f MB" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
gc.collect()
return [_tab[:, 0].astype(float), _tab[:, 1].astype(float), _tab[:, 2].astype(float),
_tab[:, 3].astype(float), _tab[:, 4], _tab[:, 5].astype(float),
_tab[:, 6].astype(float), _tab[:, 7].astype(float),
_tab[:, 8].astype(float), _tab[:, 9]]
''' Build WCS coordinate system from scratch '''
@staticmethod
def create_wcs(centers=[0, 0], crpix=[2048, 2048], cdelt=[-0.11 / 3600, 0.11 / 3600], cunit=['deg', 'deg'], \
ctype=['RA---TAN', 'DEC--TAN'], lonpole=180, latpole=24.333335, \
equinox=2000.0, radesys='ICRS'):
_w = wcs.WCS()
_w.wcs.cdelt = cdelt
_w.wcs.crpix = crpix
_w.wcs.crval = centers
_w.wcs.cunit = cunit
_w.wcs.ctype = ctype
_w.wcs.lonpole = lonpole
_w.wcs.latpole = latpole
_w.wcs.radesys = radesys
_w.wcs.equinox = equinox
return _w
''' Return coordinate system for given image file'''
@staticmethod
def read_wcs(imfile):
specialprint('Getting coordinates from %s \n' % imfile)
return wcs.WCS(fits.open(imfile)[1].header)
''' Return 'n' random radec for given image file or coordinate list '''
@staticmethod
def random_radec(n=10, center=[0, 0], shape=(4096, 4096), imfile=''):
_xy = | np.random.rand(n, 2) | numpy.random.rand |
import numpy as np
import random
from svg.file import SVGFileV2
from svg.basic import clip_float, draw_path, random_color, random_color_hsv
from svg.geo_transformation import translation_pts_xy, reflection_points
from common import gImageOutputPath
# plot function to svg
# from scipy.special import perm,comb
from itertools import combinations
def funcIdentity(x):
return x # y=x
def funcQuadratic(x):
return x**2
def funcSin(x):
return np.sin(x)
def funcCos(x):
return np.cos(x)
def normalDistribution(x):
return 1 / np.sqrt(2 * np.pi) * np.exp(-0.5 * x**2)
def softmaxFuc(x):
softmax = np.exp(x) / np.sum(np.exp(x))
# print(softmax)
# print(np.sum(softmax))
return softmax
def heartFuc(x, r=1, up=True): # heart equation: x**2+ (5*y/4 - sqrt(abs(x)))**2 = r**2
if up:
a = np.sqrt(r**2 - x**2) * 1 + np.sqrt(abs(x))
else:
a = np.sqrt(r**2 - x**2) * (-1) + np.sqrt(abs(x))
return a * 4 / 5
def circleFuc(x, r=1, up=True): # circle equation: x**2+ y**2 = r**2
if up:
a = np.sqrt(r**2 - x**2) * 1
else:
a = np.sqrt(r**2 - x**2) * (-1)
return a
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def getCirclePoints(r=1, N=10, func=heartFuc):
x = np.linspace(-r, r, N)
y = func(x, r=r) # Up part points of curve, set sqrt value positive
xDown = np.flip(x) # Down part points of curve, set sqrt value negative
yDown = func(xDown, r=r, up=False)
# connect from start
x = np.concatenate((x, xDown), axis=0)
y = np.concatenate((y, yDown), axis=0)
if 0: # connect from random
rand = np.random.randint(1, len(x), size=1)[0]
x = np.concatenate((x[rand:], x[:rand]), axis=0)
y = np.concatenate((y[rand:], y[:rand]), axis=0)
# print('x=',x)
# print('y=',y)
return x, y
def getRectanglePoints(x0=0, y0=0, N=10, w=10, h=10):
x1 = np.linspace(x0, x0 + w, N)
y1 = np.zeros_like(x1) + y0
y2 = np.linspace(y0, y0 + h, N)
x2 = np.zeros_like(y2) + x0 + w
x3 = np.flip(x1)
y3 = np.zeros_like(x3) + y0 + h
y4 = np.flip(y2)
x4 = np.zeros_like(y4) + x0
# connect from start
x = np.concatenate((x1, x2), axis=0)
x = np.concatenate((x, x3), axis=0)
x = np.concatenate((x, x4), axis=0)
y = np.concatenate((y1, y2), axis=0)
y = np.concatenate((y, y3), axis=0)
y = np.concatenate((y, y4), axis=0)
center = ((x0 + w) / 2, (y0 + h) / 2)
return x, y, center
def getRandomProper3Points(min=0, max=5):
"""get random point from 0,1,2,3 quadrants,
pt(x,y) = (min ~ max)
"""
c = list(combinations(range(4), 3))
# [(0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3)]
# print(c)
qds = random.choice(c)
# print('qds=',qds)
center = (max - min) / 2.0
pts = None
for qd in qds:
if qd == 0:
x = np.random.random() * (center - min) + min
y = np.random.random() * (center - min) + min
elif qd == 1:
x = np.random.random() * (max - center) + center
y = np.random.random() * (center - min) + min
elif qd == 2:
x = np.random.random() * (center - min) + min
y = np.random.random() * (max - center) + center
elif qd == 3:
x = np.random.random() * (max - center) + center
y = np.random.random() * (max - center) + center
pt = np.array([[x, y]])
pts = np.concatenate((pts, pt), axis=0) if pts is not None else pt
return pts
def drawFuncSVG(svg, offsetX=0, offsetY=0, color=None):
N = 500
x = np.linspace(-100, 100, N)
fOffsetX = 50
fOffsetY = 100
ptX = x + offsetX + offsetX
ptY = funcIdentity(x) * -1 + offsetY + fOffsetY
drawOneFuncSVG(svg, ptX, ptY, N=N, color=color)
fOffsetX = 50
fOffsetY = 50
ptX = x + offsetX + fOffsetX
ptY = funcQuadratic(x) * -1 + offsetY + fOffsetY
drawOneFuncSVG(svg, ptX, ptY, N=N, color=color)
fOffsetX = 50
fOffsetY = 50
ptX = x + offsetX + fOffsetX
ptY = funcSin(x) * -1 + offsetY + fOffsetY
drawOneFuncSVG(svg, ptX, ptY, N=N, color=color)
ptX = x + offsetX + fOffsetX
ptY = funcCos(x) * -1 + offsetY + fOffsetY
drawOneFuncSVG(svg, ptX, ptY, N=N, color=color)
ptX = x + offsetX + fOffsetX
ptY = normalDistribution(x) * -1 + offsetY + fOffsetY
drawOneFuncSVG(svg, ptX, ptY, N=N, color=color)
ptX = x + offsetX + fOffsetX
ptY = softmaxFuc(x) * -1 + offsetY + fOffsetY
drawOneFuncSVG(svg, ptX, ptY, N=N, color=color)
ptX = x + offsetX + fOffsetX
ptY = sigmoid(x) * -1 + offsetY + fOffsetY
drawOneFuncSVG(svg, ptX, ptY, N=N, color=color)
ptX, ptY = getCirclePoints(r=10, N=10, func=circleFuc)
ptX = ptX + offsetX + fOffsetX
ptY = ptY + offsetY + fOffsetY
drawOneFuncSVG(svg, ptX, ptY, N=N, color=color)
def drawOneFuncSVG(svg, ptX, ptY, N=10, color=None):
x = ptX[0]
y = ptY[0]
path = 'M %.1f %.1f L ' % (x, y)
for x, y in zip(ptX, ptY):
path = path + ' ' + str(clip_float(x)) + ' ' + str(clip_float(y))
svg.draw(draw_path(path, stroke_width=0.5, color=color or random_color()))
def reflect_xy(ptX, ptY):
"""Cartesian(math) coordinate to svg coordiantes
Args:
ptX (array): (N, )
ptY (array): (N, )
Returns:
tuple: (ptX', ptY')
"""
ptX = ptX.reshape((ptX.shape[0], 1))
ptY = ptY.reshape((ptY.shape[0], 1))
pts = np.hstack(([ptX, ptY]))
# print('pts=', pts, pts.shape)
return reflection_points(pts, False)
def drawFuncSVG2(svg):
W, H = svg.get_size()
cx, cy = W // 2, H // 2
N = 100
ptX, ptY = getCirclePoints(r=45, N=N, func=heartFuc)
ptX, ptY = reflect_xy(ptX, ptY)
ptX, ptY = translation_pts_xy(ptX, ptY, (cx, cy))
drawOneFuncSVG(svg, ptX, ptY, N=N, color=random_color_hsv())
ptX = np.linspace(-50, 50, num=200)
ptY = funcQuadratic(ptX)
ptX, ptY = reflect_xy(ptX, ptY)
ptX, ptY = translation_pts_xy(ptX, ptY, (cx, cy+40))
drawOneFuncSVG(svg, ptX, ptY, N=N, color=random_color_hsv())
ptX = np.linspace(-50, 50, num=200)
ptY = funcSin(ptX)*20
ptX, ptY = reflect_xy(ptX, ptY)
ptX, ptY = translation_pts_xy(ptX, ptY, (cx, cy))
drawOneFuncSVG(svg, ptX, ptY, N=N, color=random_color_hsv())
ptX = | np.linspace(-50, 50, num=200) | numpy.linspace |
"""
From <NAME>'s odysseus project.
"""
"""Functions that approach several polylogarithms by polynomials.
Precision is on the order of 1e-7 or better. For working with fermions, the
polylog functions Li(x) are usually used in the form -Li(-exp(x)). We therefore
define functions fermi_poly as:
fermi_poly_s(x) :math:`=-Li_s(-e^x)`,
with :math:`Li_s(z)=\sum_{k=1}^{\infty}\frac{z^k}{k^s}`.
This is useful if you are only dealing with Fermi statistics. For working with
bose statistics we define g-functions in a similar way.
There is a more accurate and general algorithm in lerch.py for Li_s(x),
that works for all s>0, the polynomial approximations in this file are much
faster however.
"""
import numpy as np
def fermi_poly3(x):
"""fermi_poly3(x), equal to -Li_3(-e^x)"""
def f0(x):
return np.exp(x)
def f1(x):
ex = np.exp(x)
return (1 + (-0.125 + (0.037037037037037035 + (-0.015625 + (0.008 - 0.004629629629629629*ex)*ex)*ex)*ex)*ex)*ex
def f2(x):
x2 = x**2
return 0.9015426773696955 + (0.8224670334241131 + (0.34657359027997264 + (0.08333333333333333 + (0.010416666666666666 +(-0.00017361111111111112 + (6.200396825396825e-6 +(-2.927965167548501e-7 + (1.6179486665597777e-8 + (-9.90785651003905e-10 + (6.525181428041877e-11 +(-4.5372283133067906e-12 + 3.290608283068484e-13*x2)*x2)*x2)*x2)*x2)*x2)*x2)*x2)*x)*x)*x)*x
def f3(x):
invex = np.exp(-x)
return (((((0.008*invex - 0.015625)*invex + 0.037037037037037035)*invex) - 0.125)*invex + 1)*invex + 1.6449340668482262*x + 0.16666666666666666*x**3
def f4(x):
return 1.6449340668482262*x + 0.16666666666666666*x**3
# fix for bug in piecewise, fixed in more recent numpy
if np.isscalar(x):
x = np.array([x], dtype=float)
# define piecewise function and evaluate
ans = np.piecewise(x, [x<=-20, np.logical_and(x>-20, x<=-2), \
np.logical_and(x>-2, x<=2), np.logical_and(x>2, x<=20)],\
[f0, f1, f2, f3, f4])
return ans
def fermi_poly5half(x):
"""fermi_poly5half(x), equal to -Li_{5/2}(-e^x)
FAILS TESTS (COMPARING TO LERCH), DO NOT USE WITHOUT INVESTIGATING MORE
"""
def f0(x):
return np.exp(x)
def f1(x):
ex = np.exp(x)
return (1 + (-0.17677669529663687 + (0.06415002990995843 - (0.03125 + (0.01788854381999832 - (0.011340230290662863 + (0.007713560673657698 - (0.005524271728019902 + (0.00411522633744856 - 0.0031622776601683794*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex
def f2(x):
res = (7.999472242952045e-8 + (2.015789875039643e-8 + (-5.182488893752819e-9 + (-1.3550552937770878e-9 + (3.5944104666022113e-10 + (9.653703483078106e-11 + (-2.6209625544677692e-11 + (-7.185930974961928e-12 + (1.9812061650792594e-12 + 5.447084984800099e-13*x)*x)*x)*x)*x)*x)*x)*x)*x)*x
return 0.8671998890121841+(0.7651470246254081+(0.30244932171081546+(0.06335080210161399+(0.0049450362799933825+(-0.0007320093393446121+(-0.00013339945006254949 + (0.000027147085179903566+(5.930588304137955e-6+(-1.3626304577484817e-6 + (-3.252451788607287e-7 + res*x)*x)*x)*x)*x)*x)*x)*x)*x)*x)*x
def f3(x):
res = 5.992860912139351e-7 + (-6.083668666935579e-8 + (5.041252634789406e-9 + (-3.386896134140133e-10 + (1.8196669171414837e-11 + (-7.642990316874879e-13 + (2.4202106712129105e-14 + (-5.437364923509245e-16 + (7.72925401611516e-18 -5.228771407811986e-20*x)*x)*x)*x)*x)*x)*x)*x)*x
return 0.869416215427492 + (0.7603408345815055 + (0.30606614629176887 + (0.06361411550944529 + (0.002145410757189772 + (0.002020072416997651 + (-0.0017045762862650151 + (0.0006382881546811445 + (- 0.00016246851298525836 + (0.00003140383144730955 + (-4.819813947314412e-6+res*x)*x)*x)*x)*x)*x)*x)*x)*x)*x)*x
def f4(x):
x2 = x**2
invex = np.sqrt(x)
return (-2.0851412241155116/x/x - 0.5343060576801043)/x/invex + 1.8561093322772355*invex + 0.30090111122547003*x2*invex
def f5(x):
x2 = x**2
invex = np.sqrt(x)
return 1.8561093322772355*invex + 0.30090111122547003*x2*invex
# fix for bug in piecewise, fixed in more recent numpy
if np.isscalar(x):
x = np.array([x], dtype=float)
# define piecewise function and evaluate
ans = np.piecewise(x, [x<=-20, np.logical_and(x>-20, x<=-2), \
np.logical_and(x>-2, x<=2), np.logical_and(x>2, x<=12), \
np.logical_and(x>12, x<=20)], [f0, f1, f2, f3, f4, f5])
return ans
def fermi_poly2(x):
"""fermi_poly2(x), equal to -Li_2(-e^x)"""
def f0(x):
return np.exp(x)
def f1(x):
ex = np.exp(x)
return (1.+( -0.25+( 0.111111+( -0.0625+( 0.04+( -0.0277778+( 0.0204082+( -0.015625+( 0.0123457+( -0.01+( 0.00826446+( -0.00694444+( 0.00591716+( -0.00510204+( 0.00444444+( -0.00390625+( 0.00346021+( -0.00308642+( 0.00277008+ -0.0025*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex
def f2(x):
ex = x**2
return 0.822467+(0.6931471805599453+( 0.25+( 0.04166666666666666+( -0.0010416666666666534+( 0.00004960317460316857+( -2.927965167558005e-6+(1.9415383998507108e-7+( -1.3870999148454729e-8+(1.0440288911003276e-9+(-8.167040926799743e-11+6.5806618711692295e-12*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*x)*x)*x
def f3(x):
ex = np.exp(-x)
return 1.6449340668482262 + 0.5*x**2 - (1.+( -0.25+( 0.111111+( -0.0625+( 0.04+( -0.0277778+( 0.0204082+( -0.015625+( 0.0123457+( -0.01+( 0.00826446+( -0.00694444+( 0.00591716+( -0.00510204+( 0.00444444+( -0.00390625+( 0.00346021+( -0.00308642+( 0.00277008 -0.0025*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex)*ex
def f4(x):
return 1.6449340668482262 + 0.5*x**2
# fix for bug in piecewise, fixed in more recent numpy
if np.isscalar(x):
x = np.array([x], dtype=float)
# define piecewise function and evaluate
ans = np.piecewise(x, [x<=-20, np.logical_and(x>-20, x<=-1), \
np.logical_and(x>-1, x<=1), np.logical_and(x>1, x<=20)],\
[f0, f1, f2, f3, f4])
return ans
def dilog(z):
"""Dilog(x), equal to Li_2(x)
d = dilog(z) = Li_2(z)
= -Int From t=0 To t=z log(1-t) dt/t for all z.
= Sum From n=1 To n=Inf z**n/n**2 for |z|<=1.
INPUT z: real or complex, scalar, vector or matrix.
OUTPUT d: component-wise dilogarithm of z.
References:
[1] <NAME>. 1958. Dilogarithms and associated functions. Macdonald.
[2] <NAME>. 1992. Technical Report 15-92. University of Kent computing laboratory.
[3] http://en.wikipedia.org/wiki/Polylog
<NAME>, February 28th, 2006.
"""
if isinstance(z, float) or isinstance(z, int):
z = | np.array([z]) | numpy.array |
#!/usr/bin/env python
# coding=utf-8
import io
import os
import logging
import tempfile
import unittest
import simplejson as json
from copy import deepcopy
from pathlib import Path
import numpy as np
import numpy.testing as npt
from ruamel.yaml import YAML
from ioos_qc.config import QcConfig
from ioos_qc.qartod import ClimatologyConfig
L = logging.getLogger('ioos_qc')
L.setLevel(logging.INFO)
L.handlers = [logging.StreamHandler()]
yaml = YAML(typ='safe')
class ConfigLoadTest(unittest.TestCase):
def setUp(self):
template = """
qartod:
gross_range_test:
suspect_span: [1, 11]
fail_span:
- 0
- 12
goober:
foo: [1, null]
"""
self.handle, self.yamlfile = tempfile.mkstemp(suffix='.yaml')
with open(self.yamlfile, 'w') as f:
f.write(template)
self.expected_dict = {
'qartod': {
'gross_range_test': {
'suspect_span': [1, 11],
'fail_span': [0, 12],
},
'goober': {
'foo': [1, None]
}
}
}
def tearDown(self):
os.close(self.handle)
os.remove(self.yamlfile)
def test_load_yaml_dict_object(self):
with open(self.yamlfile) as f:
y = yaml.load(f.read())
qc = QcConfig(y)
assert qc.config == self.expected_dict
def test_load_yaml_str(self):
with open(self.yamlfile) as f:
qc = QcConfig(f.read())
assert qc.config == self.expected_dict
def test_load_json_str(self):
with open(self.yamlfile) as f:
js = json.dumps(yaml.load(f.read()))
qc = QcConfig(js)
assert qc.config == self.expected_dict
def test_load_yaml_file_path(self):
qc = QcConfig(self.yamlfile)
assert qc.config == self.expected_dict
def test_load_yaml_path_object(self):
qc = QcConfig(Path(self.yamlfile))
assert qc.config == self.expected_dict
def test_load_json_stringio(self):
st = io.StringIO()
qc = QcConfig(self.yamlfile)
with open(self.yamlfile, 'rt') as f:
js = json.dumps(yaml.load(f.read()))
st.write(js)
qc = QcConfig(st)
st.close()
assert qc.config == self.expected_dict
def test_load_yaml_stringio(self):
st = io.StringIO()
with open(self.yamlfile, 'rt') as f:
st.write(f.read())
qc = QcConfig(st)
st.close()
assert qc.config == self.expected_dict
class ConfigRunTest(unittest.TestCase):
def setUp(self):
self.config = {
'qartod': {
'gross_range_test': {
'suspect_span': [1, 11],
'fail_span': [0, 12],
}
}
}
def test_run(self):
qc = QcConfig(self.config)
r = qc.run(
inp=list(range(13))
)
expected = np.array([3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3])
npt.assert_array_equal(
r['qartod']['gross_range_test'],
expected
)
assert 'aggregate' not in r['qartod']
def test_run_with_agg(self):
qc = QcConfig({'qartod': {
'gross_range_test': {
'fail_span': [0, 12],
},
'spike_test': {
'suspect_threshold': 3,
'fail_threshold': 10,
}
}})
inp = [-1, 0, 1, 2, 10, 3]
expected_gross_range = | np.array([4, 1, 1, 1, 1, 1]) | numpy.array |
import os
import glob
import pickle
import pcl
import torch
import torch.utils.data
import torch.nn as nn
import numpy as np
# global configurations:
from autolab_core import YamlConfig
from dexnet.grasping import GpgGraspSampler
from dexnet.grasping import RobotGripper
home_dir = os.environ['HOME']
yaml_config = YamlConfig(home_dir + "/Projects/PointNetGPD/dex-net/test/config.yaml")
gripper_name = 'robotiq_85'
gripper = RobotGripper.load(gripper_name, home_dir + "/Projects/PointNetGPD/dex-net/data/grippers")
ags = GpgGraspSampler(gripper, yaml_config)
class PointGraspDataset(torch.utils.data.Dataset):
def __init__(self, obj_points_num, grasp_points_num, pc_file_used_num, grasp_amount_per_file, thresh_good,
thresh_bad, path, tag, with_obj=False, projection=False, project_chann=3, project_size=60):
self.obj_points_num = obj_points_num
self.grasp_points_num = grasp_points_num
self.pc_file_used_num = pc_file_used_num
self.grasp_amount_per_file = grasp_amount_per_file
self.path = path
self.tag = tag
self.thresh_good = thresh_good
self.thresh_bad = thresh_bad
self.with_obj = with_obj
self.min_point_limit = 50
# projection related
self.projection = projection
self.project_chann = project_chann
if self.project_chann not in [3, 12]:
raise NotImplementedError
self.project_size = project_size
if self.project_size != 60:
raise NotImplementedError
self.normal_K = 10
self.voxel_point_num = 50
self.projection_margin = 1
self.transform = pickle.load(open(os.path.join(self.path, 'google2cloud.pkl'), 'rb'))
fl_grasp = glob.glob(os.path.join(path, 'ycb_grasp', self.tag, '*.npy'))
fl_pc = glob.glob(os.path.join(path, 'ycb_rgbd', '*', 'clouds', '*.npy'))
self.d_pc, self.d_grasp = {}, {}
for i in fl_pc:
k = i.split('/')[-3]
if k in self.d_pc.keys():
self.d_pc[k].append(i)
else:
self.d_pc[k] = [i]
for i in fl_grasp:
k = i.split('/')[-1].split('.')[0]
self.d_grasp[k] = i
object1 = set(self.d_grasp.keys())
object2 = set(self.transform.keys())
self.object = list(object1.intersection(object2))
self.amount = len(self.object) * self.grasp_amount_per_file
def collect_pc(self, grasp, pc, transform):
center = grasp[0:3]
axis = grasp[3:6] # binormal
width = grasp[6]
angle = grasp[7]
axis = axis/np.linalg.norm(axis)
binormal = axis
# cal approach
cos_t = np.cos(angle)
sin_t = np.sin(angle)
R1 = np.c_[[cos_t, 0, sin_t],[0, 1, 0],[-sin_t, 0, cos_t]]
axis_y = axis
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
approach = R2.dot(R1)[:, 0]
approach = approach / np.linalg.norm(approach)
minor_normal = np.cross(axis, approach)
left = center - width*axis/2
right = center + width*axis/2
# bottom = center - width*approach
left = (np.dot(transform, np.array([left[0], left[1], left[2], 1])))[:3]
right = (np.dot(transform, np.array([right[0], right[1], right[2], 1])))[:3]
# bottom = (transform @ np.array([bottom[0], bottom[1], bottom[2], 1]))[:3]
center = (np.dot(transform, np.array([center[0], center[1], center[2], 1])))[:3]
binormal = (np.dot(transform, np.array([binormal[0], binormal[1], binormal[2], 1])))[:3].reshape(3, 1)
approach = (np.dot(transform, np.array([approach[0], approach[1], approach[2], 1])))[:3].reshape(3, 1)
minor_normal = (np.dot(transform, np.array([minor_normal[0], minor_normal[1], minor_normal[2], 1])))[:3].reshape(3, 1)
matrix = np.hstack([approach, binormal, minor_normal]).T
# pc_p2c/left_t/right_t is in local coordinate(with center as origin)
# other(include pc) are in pc coordinate
pc_p2c = (np.dot(matrix, (pc-center).T)).T
left_t = (-width * np.array([0,1,0]) / 2).squeeze()
right_t = (width * np.array([0,1,0]) / 2).squeeze()
x_limit = width/4
z_limit = width/4
y_limit = width/2
x1 = pc_p2c[:, 0] > -x_limit
x2 = pc_p2c[:, 0] < x_limit
y1 = pc_p2c[:, 1] > -y_limit
y2 = pc_p2c[:, 1] < y_limit
z1 = pc_p2c[:, 2] > -z_limit
z2 = pc_p2c[:, 2] < z_limit
a = np.vstack([x1, x2, y1, y2, z1, z2])
self.in_ind = np.where(np.sum(a, axis=0) == len(a))[0]
if len(self.in_ind) < self.min_point_limit:
return None
if self.projection:
return self.project_pc(pc_p2c, width)
else:
return pc_p2c[self.in_ind]
def check_square(self, point, points_g):
dirs = np.array([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1],
[-1, 1, -1], [1, 1, -1], [-1, -1, -1], [1, -1, -1]])
p = dirs * 0.5 + point # here res * 0.5 means get half of a pixel width
a1 = p[2][1] < points_g[:, 1]
a2 = p[0][1] > points_g[:, 1]
a3 = p[0][2] > points_g[:, 2]
a4 = p[4][2] < points_g[:, 2]
a5 = p[1][0] > points_g[:, 0]
a6 = p[0][0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
return points_in_area
def cal_projection(self, point_cloud_voxel, m_width_of_pic, margin, surface_normal, order, gripper_width):
occupy_pic = np.zeros([m_width_of_pic, m_width_of_pic, 1])
norm_pic = np.zeros([m_width_of_pic, m_width_of_pic, 3])
norm_pic_num = np.zeros([m_width_of_pic, m_width_of_pic, 1])
max_x = point_cloud_voxel[:, order[0]].max()
min_x = point_cloud_voxel[:, order[0]].min()
max_y = point_cloud_voxel[:, order[1]].max()
min_y = point_cloud_voxel[:, order[1]].min()
min_z = point_cloud_voxel[:, order[2]].min()
tmp = max((max_x - min_x), (max_y - min_y))
if tmp == 0:
print("WARNING : the num of input points seems only have one, no possilbe to do learning on"
"such data, please throw it away. -- Hongzhuo")
return occupy_pic, norm_pic
# Here, we use the gripper width to cal the res:
res = gripper_width / (m_width_of_pic-margin)
voxel_points_square_norm = []
x_coord_r = ((point_cloud_voxel[:, order[0]]) / res + m_width_of_pic / 2)
y_coord_r = ((point_cloud_voxel[:, order[1]]) / res + m_width_of_pic / 2)
z_coord_r = ((point_cloud_voxel[:, order[2]]) / res + m_width_of_pic / 2)
x_coord_r = np.floor(x_coord_r).astype(int)
y_coord_r = np.floor(y_coord_r).astype(int)
z_coord_r = np.floor(z_coord_r).astype(int)
voxel_index = np.array([x_coord_r, y_coord_r, z_coord_r]).T # all point in grid
coordinate_buffer = np.unique(voxel_index, axis=0) # get a list of points without duplication
K = len(coordinate_buffer)
# [K, 1] store number of points in each voxel grid
number_buffer = np.zeros(shape=K, dtype=np.int64)
feature_buffer = np.zeros(shape=(K, self.voxel_point_num, 6), dtype=np.float32)
index_buffer = {}
for i in range(K):
index_buffer[tuple(coordinate_buffer[i])] = i # got index of coordinate
for voxel, point, normal in zip(voxel_index, point_cloud_voxel, surface_normal):
index = index_buffer[tuple(voxel)]
number = number_buffer[index]
if number < self.voxel_point_num:
feature_buffer[index, number, :3] = point
feature_buffer[index, number, 3:6] = normal
number_buffer[index] += 1
voxel_points_square_norm = np.sum(feature_buffer[..., -3:], axis=1)/number_buffer[:, np.newaxis]
voxel_points_square = coordinate_buffer
if len(voxel_points_square) == 0:
return occupy_pic, norm_pic
x_coord_square = voxel_points_square[:, 0]
y_coord_square = voxel_points_square[:, 1]
norm_pic[x_coord_square, y_coord_square, :] = voxel_points_square_norm
occupy_pic[x_coord_square, y_coord_square] = number_buffer[:, np.newaxis]
occupy_max = occupy_pic.max()
assert(occupy_max > 0)
occupy_pic = occupy_pic / occupy_max
return occupy_pic, norm_pic
def project_pc(self, pc, gripper_width):
"""
for gpd baseline, only support input_chann == [3, 12]
"""
pc = pc.astype(np.float32)
pc = pcl.PointCloud(pc)
norm = pc.make_NormalEstimation()
norm.set_KSearch(self.normal_K)
normals = norm.compute()
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
pc = pc.to_array()
grasp_pc = pc[self.in_ind]
grasp_pc_norm = surface_normal[self.in_ind]
bad_check = (grasp_pc_norm != grasp_pc_norm)
if np.sum(bad_check)!=0:
bad_ind = np.where(bad_check == True)
grasp_pc = np.delete(grasp_pc, bad_ind[0], axis=0)
grasp_pc_norm = np.delete(grasp_pc_norm, bad_ind[0], axis=0)
assert(np.sum(grasp_pc_norm != grasp_pc_norm) == 0)
m_width_of_pic = self.project_size
margin = self.projection_margin
order = np.array([0, 1, 2])
occupy_pic1, norm_pic1 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
if self.project_chann == 3:
output = norm_pic1
elif self.project_chann == 12:
order = np.array([1, 2, 0])
occupy_pic2, norm_pic2 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
order = np.array([0, 2, 1])
occupy_pic3, norm_pic3 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
output = np.dstack([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3])
else:
raise NotImplementedError
return output
def __getitem__(self, index):
# try:
obj_ind, grasp_ind = np.unravel_index(index, (len(self.object), self.grasp_amount_per_file))
obj_grasp = self.object[obj_ind]
obj_pc = self.transform[obj_grasp][0]
f_grasp = self.d_grasp[obj_grasp]
fl_pc = np.array(self.d_pc[obj_pc])
fl_pc = fl_pc[np.random.choice(len(fl_pc), size=self.pc_file_used_num)]
grasp = np.load(f_grasp)[grasp_ind]
pc = np.vstack([np.load(i) for i in fl_pc])
pc = pc[np.random.choice(len(pc), size=self.obj_points_num)]
t = self.transform[obj_grasp][1]
grasp_pc = self.collect_pc(grasp, pc, t)
if grasp_pc is None:
return None
level_score, refine_score = grasp[-2:]
if not self.projection:
if len(grasp_pc) > self.grasp_points_num:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=False)].T
else:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=True)].T
else:
grasp_pc = grasp_pc.transpose((2, 1, 0))
score = level_score + refine_score*0.01
if score >= self.thresh_bad:
label = 0
elif score <= self.thresh_good:
label = 1
else:
return None
if self.with_obj:
return grasp_pc, label, obj_grasp
else:
return grasp_pc, label
def __len__(self):
return self.amount
class PointGraspMultiClassDataset(torch.utils.data.Dataset):
def __init__(self, obj_points_num, grasp_points_num, pc_file_used_num, grasp_amount_per_file, thresh_good,
thresh_bad, path, tag, with_obj=False, projection=False, project_chann=3, project_size=60):
self.obj_points_num = obj_points_num
self.grasp_points_num = grasp_points_num
self.pc_file_used_num = pc_file_used_num
self.grasp_amount_per_file = grasp_amount_per_file
self.path = path
self.tag = tag
self.thresh_good = thresh_good
self.thresh_bad = thresh_bad
self.with_obj = with_obj
self.min_point_limit = 50
# projection related
self.projection = projection
self.project_chann = project_chann
if self.project_chann not in [3, 12]:
raise NotImplementedError
self.project_size = project_size
if self.project_size != 60:
raise NotImplementedError
self.normal_K = 10
self.voxel_point_num = 50
self.projection_margin = 1
self.transform = pickle.load(open(os.path.join(self.path, 'google2cloud.pkl'), 'rb'))
fl_grasp = glob.glob(os.path.join(path, 'ycb_grasp', self.tag, '*.npy'))
fl_pc = glob.glob(os.path.join(path, 'ycb_rgbd', '*', 'clouds', '*.npy'))
self.d_pc, self.d_grasp = {}, {}
for i in fl_pc:
k = i.split('/')[-3]
if k in self.d_pc.keys():
self.d_pc[k].append(i)
else:
self.d_pc[k] = [i]
for i in fl_grasp:
k = i.split('/')[-1].split('.')[0]
self.d_grasp[k] = i
object1 = set(self.d_grasp.keys())
object2 = set(self.transform.keys())
self.object = list(object1.intersection(object2))
self.amount = len(self.object) * self.grasp_amount_per_file
def collect_pc(self, grasp, pc, transform):
center = grasp[0:3]
axis = grasp[3:6] # binormal
width = grasp[6]
angle = grasp[7]
axis = axis/np.linalg.norm(axis)
binormal = axis
# cal approach
cos_t = np.cos(angle)
sin_t = np.sin(angle)
R1 = np.c_[[cos_t, 0, sin_t],[0, 1, 0],[-sin_t, 0, cos_t]]
axis_y = axis
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
approach = R2.dot(R1)[:, 0]
approach = approach / np.linalg.norm(approach)
minor_normal = np.cross(axis, approach)
left = center - width*axis/2
right = center + width*axis/2
# bottom = center - width*approach
left = (np.dot(transform, np.array([left[0], left[1], left[2], 1])))[:3]
right = (np.dot(transform, np.array([right[0], right[1], right[2], 1])))[:3]
# bottom = (transform @ np.array([bottom[0], bottom[1], bottom[2], 1]))[:3]
center = (np.dot(transform, np.array([center[0], center[1], center[2], 1])))[:3]
binormal = (np.dot(transform, np.array([binormal[0], binormal[1], binormal[2], 1])))[:3].reshape(3, 1)
approach = (np.dot(transform, np.array([approach[0], approach[1], approach[2], 1])))[:3].reshape(3, 1)
minor_normal = (np.dot(transform, np.array([minor_normal[0], minor_normal[1], minor_normal[2], 1])))[:3].reshape(3, 1)
matrix = np.hstack([approach, binormal, minor_normal]).T
# pc_p2c/left_t/right_t is in local coordinate(with center as origin)
# other(include pc) are in pc coordinate
pc_p2c = (np.dot(matrix, (pc-center).T)).T
left_t = (-width * np.array([0,1,0]) / 2).squeeze()
right_t = (width * np.array([0,1,0]) / 2).squeeze()
x_limit = width/4
z_limit = width/4
y_limit = width/2
x1 = pc_p2c[:, 0] > -x_limit
x2 = pc_p2c[:, 0] < x_limit
y1 = pc_p2c[:, 1] > -y_limit
y2 = pc_p2c[:, 1] < y_limit
z1 = pc_p2c[:, 2] > -z_limit
z2 = pc_p2c[:, 2] < z_limit
a = np.vstack([x1, x2, y1, y2, z1, z2])
self.in_ind = np.where(np.sum(a, axis=0) == len(a))[0]
if len(self.in_ind) < self.min_point_limit:
return None
if self.projection:
return self.project_pc(pc_p2c, width)
else:
return pc_p2c[self.in_ind]
def check_square(self, point, points_g):
dirs = np.array([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1],
[-1, 1, -1], [1, 1, -1], [-1, -1, -1], [1, -1, -1]])
p = dirs * 0.5 + point # here res * 0.5 means get half of a pixel width
a1 = p[2][1] < points_g[:, 1]
a2 = p[0][1] > points_g[:, 1]
a3 = p[0][2] > points_g[:, 2]
a4 = p[4][2] < points_g[:, 2]
a5 = p[1][0] > points_g[:, 0]
a6 = p[0][0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
return points_in_area
def cal_projection(self, point_cloud_voxel, m_width_of_pic, margin, surface_normal, order, gripper_width):
occupy_pic = np.zeros([m_width_of_pic, m_width_of_pic, 1])
norm_pic = np.zeros([m_width_of_pic, m_width_of_pic, 3])
norm_pic_num = np.zeros([m_width_of_pic, m_width_of_pic, 1])
max_x = point_cloud_voxel[:, order[0]].max()
min_x = point_cloud_voxel[:, order[0]].min()
max_y = point_cloud_voxel[:, order[1]].max()
min_y = point_cloud_voxel[:, order[1]].min()
min_z = point_cloud_voxel[:, order[2]].min()
tmp = max((max_x - min_x), (max_y - min_y))
if tmp == 0:
print("WARNING : the num of input points seems only have one, no possilbe to do learning on"
"such data, please throw it away. -- Hongzhuo")
return occupy_pic, norm_pic
# Here, we use the gripper width to cal the res:
res = gripper_width / (m_width_of_pic-margin)
voxel_points_square_norm = []
x_coord_r = ((point_cloud_voxel[:, order[0]]) / res + m_width_of_pic / 2)
y_coord_r = ((point_cloud_voxel[:, order[1]]) / res + m_width_of_pic / 2)
z_coord_r = ((point_cloud_voxel[:, order[2]]) / res + m_width_of_pic / 2)
x_coord_r = np.floor(x_coord_r).astype(int)
y_coord_r = np.floor(y_coord_r).astype(int)
z_coord_r = np.floor(z_coord_r).astype(int)
voxel_index = np.array([x_coord_r, y_coord_r, z_coord_r]).T # all point in grid
coordinate_buffer = np.unique(voxel_index, axis=0) # get a list of points without duplication
K = len(coordinate_buffer)
# [K, 1] store number of points in each voxel grid
number_buffer = np.zeros(shape=K, dtype=np.int64)
feature_buffer = np.zeros(shape=(K, self.voxel_point_num, 6), dtype=np.float32)
index_buffer = {}
for i in range(K):
index_buffer[tuple(coordinate_buffer[i])] = i # got index of coordinate
for voxel, point, normal in zip(voxel_index, point_cloud_voxel, surface_normal):
index = index_buffer[tuple(voxel)]
number = number_buffer[index]
if number < self.voxel_point_num:
feature_buffer[index, number, :3] = point
feature_buffer[index, number, 3:6] = normal
number_buffer[index] += 1
voxel_points_square_norm = np.sum(feature_buffer[..., -3:], axis=1)/number_buffer[:, np.newaxis]
voxel_points_square = coordinate_buffer
if len(voxel_points_square) == 0:
return occupy_pic, norm_pic
x_coord_square = voxel_points_square[:, 0]
y_coord_square = voxel_points_square[:, 1]
norm_pic[x_coord_square, y_coord_square, :] = voxel_points_square_norm
occupy_pic[x_coord_square, y_coord_square] = number_buffer[:, np.newaxis]
occupy_max = occupy_pic.max()
assert(occupy_max > 0)
occupy_pic = occupy_pic / occupy_max
return occupy_pic, norm_pic
def project_pc(self, pc, gripper_width):
"""
for gpd baseline, only support input_chann == [3, 12]
"""
pc = pc.astype(np.float32)
pc = pcl.PointCloud(pc)
norm = pc.make_NormalEstimation()
norm.set_KSearch(self.normal_K)
normals = norm.compute()
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
pc = pc.to_array()
grasp_pc = pc[self.in_ind]
grasp_pc_norm = surface_normal[self.in_ind]
bad_check = (grasp_pc_norm != grasp_pc_norm)
if np.sum(bad_check)!=0:
bad_ind = np.where(bad_check == True)
grasp_pc = np.delete(grasp_pc, bad_ind[0], axis=0)
grasp_pc_norm = np.delete(grasp_pc_norm, bad_ind[0], axis=0)
assert(np.sum(grasp_pc_norm != grasp_pc_norm) == 0)
m_width_of_pic = self.project_size
margin = self.projection_margin
order = np.array([0, 1, 2])
occupy_pic1, norm_pic1 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
if self.project_chann == 3:
output = norm_pic1
elif self.project_chann == 12:
order = np.array([1, 2, 0])
occupy_pic2, norm_pic2 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
order = np.array([0, 2, 1])
occupy_pic3, norm_pic3 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
output = np.dstack([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3])
else:
raise NotImplementedError
return output
def __getitem__(self, index):
# try:
obj_ind, grasp_ind = np.unravel_index(index, (len(self.object), self.grasp_amount_per_file))
obj_grasp = self.object[obj_ind]
obj_pc = self.transform[obj_grasp][0]
f_grasp = self.d_grasp[obj_grasp]
fl_pc = np.array(self.d_pc[obj_pc])
fl_pc = fl_pc[np.random.choice(len(fl_pc), size=self.pc_file_used_num)]
grasp = np.load(f_grasp)[grasp_ind]
pc = np.vstack([np.load(i) for i in fl_pc])
pc = pc[np.random.choice(len(pc), size=self.obj_points_num)]
t = self.transform[obj_grasp][1]
grasp_pc = self.collect_pc(grasp, pc, t)
if grasp_pc is None:
return None
level_score, refine_score = grasp[-2:]
if not self.projection:
if len(grasp_pc) > self.grasp_points_num:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=False)].T
else:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=True)].T
else:
grasp_pc = grasp_pc.transpose((2, 1, 0))
score = level_score + refine_score*0.01
if score >= self.thresh_bad:
label = 0
elif score <= self.thresh_good:
label = 2
else:
label = 1
if self.with_obj:
return grasp_pc, label, obj_grasp
else:
return grasp_pc, label
def __len__(self):
return self.amount
class PointGraspOneViewDataset(torch.utils.data.Dataset):
def __init__(self, grasp_points_num, grasp_amount_per_file, thresh_good,
thresh_bad, path, tag, with_obj=False, projection=False, project_chann=3, project_size=60):
self.grasp_points_num = grasp_points_num
self.grasp_amount_per_file = grasp_amount_per_file
self.path = path
self.tag = tag
self.thresh_good = thresh_good
self.thresh_bad = thresh_bad
self.with_obj = with_obj
self.min_point_limit = 150 # 最低点数限制
# projection related 投影相关参数
self.projection = projection
self.project_chann = project_chann
if self.project_chann not in [3, 12]:
raise NotImplementedError
self.project_size = project_size
if self.project_size != 60:
raise NotImplementedError
self.normal_K = 10
self.voxel_point_num = 50
self.projection_margin = 1
self.minimum_point_amount = 150
# google扫描仪到点云的转换矩阵
self.transform = pickle.load(open(os.path.join(self.path, 'google2cloud.pkl'), 'rb'))
fl_grasp = glob.glob(os.path.join(path, 'ycb_grasp', self.tag, '*.npy')) # grasp pose file
# 仅获取相机NP3采集的点云
fl_pc = glob.glob(os.path.join(path, 'ycb_rgbd', '*', 'clouds', 'pc_NP3_NP5*.npy')) # point cloud file
self.d_pc, self.d_grasp = {}, {}
for i in fl_pc: # 获取点云文件列表
k = i.split('/')[-3]
if k in self.d_pc.keys():
self.d_pc[k].append(i)
else:
self.d_pc[k] = [i]
for k in self.d_pc.keys():
self.d_pc[k].sort()
for i in fl_grasp: # 获取已生成的抓取姿态列表
grasp_fl_name = i.split('/')[-1].split('.')[0] # grasp文件名
cnt = grasp_fl_name.split('_')[-1] # grasp文件尾
head = grasp_fl_name.split('_')[0] # grasp文件头
k = grasp_fl_name[len(head)+1:-(len(cnt)+1)] # 标准物品名称
self.d_grasp[k] = i
object1 = set(self.d_grasp.keys()) # objects to deal with
# print("object1", object1)
object2 = set(self.transform.keys()) # all ycb objects name
# print("object2", object2)
self.object = list(object1)
# self.object = list(object1.intersection(object2)) # 取交集
print("objects to deal with", self.object)
self.amount = len(self.object) * self.grasp_amount_per_file
def collect_pc(self, grasp, pc, transform):
"""
获取手抓闭合区域中的点云
:param grasp: 扫描仪获取的mesh坐标系下抓取姿态 (grasp_center, grasp_axis, grasp_angle, grasp_width, jaw_width)
:param pc: 点云
:param transform: 扫描仪mesh到点云的转换矩阵
:param vis: 可视化选项
:return: 手抓闭合区域中的点云, 或其投影
"""
# 轴角表示
center = grasp[0:3] # 抓取姿态中心点
axis = grasp[3:6] # binormal 副法线
width = grasp[6] # 抓取姿态宽度
angle = grasp[7] # 旋转角
axis = axis/np.linalg.norm(axis) # (3,)
binormal = axis
# cal approach
cos_t = np.cos(angle)
sin_t = np.sin(angle)
R1 = np.c_[[cos_t, 0, sin_t], [0, 1, 0], [-sin_t, 0, cos_t]] # 旋转矩阵
axis_y = axis
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
# 各轴单位方向向量
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]] # 旋转矩阵
approach = R2.dot(R1)[:, 0]
approach = approach / np.linalg.norm(approach) # 手抓朝向
minor_normal = -np.cross(axis, approach) # 次曲率方向 NOTE: 添加了负号调整为右手坐标系
# 碰撞检测
# grasp_bottom_center = -ags.gripper.hand_depth * approach + center
# hand_points = ags.get_hand_points(grasp_bottom_center, approach, binormal)
# local_hand_points = ags.get_hand_points(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 1, 0]))
# if_collide = ags.check_collide(grasp_bottom_center, approach,
# binormal, minor_normal, graspable, local_hand_points)
vis = False
if vis: # NOTE:此处获得的抓取姿态可能与点云存在碰撞(影响不是很大)!!! TODO:碰撞检查
mlab.figure(bgcolor=(1, 1, 1), size=(1000, 800))
mlab.pipeline.surface(mlab.pipeline.open("/home/sdhm/Projects/PointNetGPD/PointNetGPD/data/"
"ycb_meshes_google/003_cracker_box/google_512k/nontextured.ply"))
# ---扫描仪坐标系下---:
# 世界坐标系
show_line([0, 0, 0], [0.1, 0, 0], color='r', scale_factor=.0015)
show_line([0, 0, 0], [0, 0.1, 0], color='g', scale_factor=.0015)
show_line([0, 0, 0], [0, 0, 0.1], color='b', scale_factor=.0015)
show_points(pc, color='b', scale_factor=.002) # 原始点云
show_points(center, color='r', scale_factor=.008)
# 显示手抓坐标系
show_line(center, (center + binormal * 0.05).reshape(3), color='g', scale_factor=.0015)
show_line(center, (center + approach * 0.05).reshape(3), color='r', scale_factor=.0015)
show_line(center, (center + minor_normal * 0.05).reshape(3), color='b', scale_factor=.0015)
grasp_bottom_center = -ags.gripper.hand_depth * approach + center
hand_points = ags.get_hand_points(grasp_bottom_center, approach, binormal)
ags.show_grasp_3d(hand_points, color=(0.4, 0.6, 0.0))
mlab.title("google", size=0.3, color=(0, 0, 0))
mlab.show()
left = center - width*axis/2 # 手抓最左侧点
right = center + width*axis/2 # 手抓最右侧点
# bottom = center - width*approach
left = (np.dot(transform, np.array([left[0], left[1], left[2], 1])))[:3]
right = (np.dot(transform, np.array([right[0], right[1], right[2], 1])))[:3]
# bottom = (transform @ np.array([bottom[0], bottom[1], bottom[2], 1]))[:3]
# NOTE: m:mesh c:center p:point cloud
matrix_m2c = np.array([approach, binormal, minor_normal]) # 旋转矩阵: 扫描仪坐标系->中心点坐标系
matrix_p2m = transform[:3, :3] # 旋转矩阵: 点云坐标系->扫描仪坐标系
trans_p2m = transform[:, 3:][:3].reshape(3,) # 平移矩阵: 点云坐标系->扫描仪坐标系
trans_p2m = np.array([trans_p2m[0], trans_p2m[1], trans_p2m[2] + 0.02]) # 微调
pc_p2m = np.dot(matrix_p2m.T, (pc - trans_p2m).T).T # 配准到扫描仪坐标系下的点云
pc_m2c = (np.dot(matrix_m2c, (pc_p2m-center).T)).T # 扫描仪坐标系下点云转换到中心点坐标系下
# pc_c2m = (np.dot(matrix_m2c.T, pc_m2c.T)).T + center # 中心点坐标系下点云转换到扫描仪坐标系下
left_t = (-width * np.array([0, 1, 0]) / 2).squeeze()
right_t = (width * np.array([0, 1, 0]) / 2).squeeze()
# 获取手抓闭合区域中的点
x_limit = ags.gripper.hand_depth
z_limit = ags.gripper.hand_height
y_limit = width
x1 = pc_m2c[:, 0] > -x_limit
x2 = pc_m2c[:, 0] < 0
y1 = pc_m2c[:, 1] > -y_limit/2
y2 = pc_m2c[:, 1] < y_limit/2
z1 = pc_m2c[:, 2] > -z_limit/2
z2 = pc_m2c[:, 2] < z_limit/2
a = np.vstack([x1, x2, y1, y2, z1, z2])
self.in_ind = np.where(np.sum(a, axis=0) == len(a))[0] # 手抓闭合区域中点的索引
if len(self.in_ind) < self.min_point_limit: # 手抓闭合区域内点数太少
# print("\033[0;32m%s\033[0m" % "[INFO] points num", len(self.in_ind))
return None
vis = False
if vis: # 显示手抓闭合区域内点云
mlab.figure(bgcolor=(1, 1, 1), size=(1000, 800))
mlab.pipeline.surface(mlab.pipeline.open("/home/sdhm/Projects/PointNetGPD/PointNetGPD/data/"
"ycb_meshes_google/003_cracker_box/google_512k/nontextured.ply"))
# 世界坐标系
show_line([0, 0, 0], [0.1, 0, 0], color='r', scale_factor=.0015)
show_line([0, 0, 0], [0, 0.1, 0], color='g', scale_factor=.0015)
show_line([0, 0, 0], [0, 0, 0.1], color='b', scale_factor=.0015)
# show_points(pc, color='b', scale_factor=.002) # 原始点云
show_points(pc_p2m, color='g', scale_factor=.002) # 配准到扫描仪坐标系下点云
show_points(pc_m2c, color='b', scale_factor=.002) # 手抓中心坐标系下点云
# show_points(pc_c2m, color='r', scale_factor=.002) # 手抓中心坐标系转换到扫描仪坐标系下点云
# 显示扫描仪坐标系下手抓
grasp_bottom_center = -ags.gripper.hand_depth * approach + center
hand_points = ags.get_hand_points(grasp_bottom_center, approach, binormal)
ags.show_grasp_3d(hand_points, color=(0.0, 1.0, 0.0))
# 中心点坐标系下手抓(应在世界坐标系原点)
hand_points = (np.dot(matrix_m2c, (hand_points - center).T)).T # 手抓关键点转换到中心点坐标系
ags.show_grasp_3d(hand_points, color=(0.5, 0.5, 0.5)) # 显示手抓
# 扫描仪坐标系下抓取坐标系
show_points(center, color='r', scale_factor=.008) # 扫描仪坐标系下中心点
show_line(center, (center + binormal * 0.05).reshape(3), color='g', scale_factor=.0015)
show_line(center, (center + approach * 0.05).reshape(3), color='r', scale_factor=.0015)
show_line(center, (center + minor_normal * 0.05).reshape(3), color='b', scale_factor=.0015)
show_points(pc_m2c, color='c', scale_factor=.002) # 手抓中心坐标系下点云
show_points(pc_m2c[self.in_ind], color='b', scale_factor=.002) # 中心点坐标系下手抓闭合区域中的点云
pc_c2m_region = (np.dot(matrix_m2c.T, pc_m2c[self.in_ind].T)).T + center # 扫描仪坐标系下手抓闭合区域中的点云
show_points(pc_c2m_region, color='r', scale_factor=.002)
# 显示手抓闭合区域
# x = (np.array([[-1, 1, 1, -1, -1], [-1, 1, 1, -1, -1]]) - 1) * x_limit/2
# y = np.array([[-1, -1, -1, -1, -1], [1, 1, 1, 1, 1]]) * y_limit
# z = np.array([[1, 1, -1, -1, 1], [1, 1, -1, -1, 1]]) * z_limit
# mlab.mesh(x, y, z, color=(1, 0, 0), opacity=0.4)
# 体积为1的正方体的八个顶点
x_arr = np.array([-1, 1, 1, -1, -1, 1, 1, -1])/2
y_arr = np.array([-1, -1, 1, 1, -1, -1, 1, 1])/2
z_arr = np.array([-1, -1, -1, -1, 1, 1, 1, 1])/2
x = (x_arr - 0.5) * ags.gripper.hand_depth # 平移半个单位
y = y_arr * (ags.gripper.hand_outer_diameter-2*ags.gripper.finger_width)
z = z_arr * ags.gripper.hand_height
triangles = [(0, 1, 2), (0, 2, 3), (4, 5, 6), (4, 6, 7), (1, 5, 6), (1, 2, 6),
(0, 4, 7), (0, 3, 7), (2, 3, 6), (3, 6, 7), (0, 1, 5), (0, 4, 5)]
mlab.triangular_mesh(x, y, z, triangles, color=(1, 0, 1), opacity=0.2)
mlab.title("cloud", size=0.3, color=(0, 0, 0))
mlab.show()
if self.projection:
return self.project_pc(pc_m2c, width) # 返回投影后的点云
else:
return pc_m2c[self.in_ind] # 返回手抓闭合区域中的点云
def check_square(self, point, points_g):
dirs = np.array([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1],
[-1, 1, -1], [1, 1, -1], [-1, -1, -1], [1, -1, -1]])
p = dirs * 0.5 + point # here res * 0.5 means get half of a pixel width
a1 = p[2][1] < points_g[:, 1]
a2 = p[0][1] > points_g[:, 1]
a3 = p[0][2] > points_g[:, 2]
a4 = p[4][2] < points_g[:, 2]
a5 = p[1][0] > points_g[:, 0]
a6 = p[0][0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
return points_in_area
def cal_projection(self, point_cloud_voxel, m_width_of_pic, margin, surface_normal, order, gripper_width):
"""
计算点云投影
:param point_cloud_voxel:
:param m_width_of_pic:
:param margin:
:param surface_normal:
:param order:
:param gripper_width:
:return:
"""
occupy_pic = np.zeros([m_width_of_pic, m_width_of_pic, 1])
norm_pic = np.zeros([m_width_of_pic, m_width_of_pic, 3])
norm_pic_num = np.zeros([m_width_of_pic, m_width_of_pic, 1])
max_x = point_cloud_voxel[:, order[0]].max()
min_x = point_cloud_voxel[:, order[0]].min()
max_y = point_cloud_voxel[:, order[1]].max()
min_y = point_cloud_voxel[:, order[1]].min()
min_z = point_cloud_voxel[:, order[2]].min()
tmp = max((max_x - min_x), (max_y - min_y))
if tmp == 0:
print("WARNING : the num of input points seems only have one, no possilbe to do learning on"
"such data, please throw it away. -- Hongzhuo")
return occupy_pic, norm_pic
# Here, we use the gripper width to cal the res:
res = gripper_width / (m_width_of_pic-margin)
voxel_points_square_norm = []
x_coord_r = ((point_cloud_voxel[:, order[0]]) / res + m_width_of_pic / 2)
y_coord_r = ((point_cloud_voxel[:, order[1]]) / res + m_width_of_pic / 2)
z_coord_r = ((point_cloud_voxel[:, order[2]]) / res + m_width_of_pic / 2)
x_coord_r = np.floor(x_coord_r).astype(int)
y_coord_r = np.floor(y_coord_r).astype(int)
z_coord_r = np.floor(z_coord_r).astype(int)
voxel_index = np.array([x_coord_r, y_coord_r, z_coord_r]).T # all point in grid
coordinate_buffer = np.unique(voxel_index, axis=0) # get a list of points without duplication
K = len(coordinate_buffer)
# [K, 1] store number of points in each voxel grid
number_buffer = np.zeros(shape=K, dtype=np.int64)
feature_buffer = np.zeros(shape=(K, self.voxel_point_num, 6), dtype=np.float32)
index_buffer = {}
for i in range(K):
index_buffer[tuple(coordinate_buffer[i])] = i # got index of coordinate
for voxel, point, normal in zip(voxel_index, point_cloud_voxel, surface_normal):
index = index_buffer[tuple(voxel)]
number = number_buffer[index]
if number < self.voxel_point_num:
feature_buffer[index, number, :3] = point
feature_buffer[index, number, 3:6] = normal
number_buffer[index] += 1
voxel_points_square_norm = np.sum(feature_buffer[..., -3:], axis=1)/number_buffer[:, np.newaxis]
voxel_points_square = coordinate_buffer
if len(voxel_points_square) == 0:
return occupy_pic, norm_pic
x_coord_square = voxel_points_square[:, 0]
y_coord_square = voxel_points_square[:, 1]
norm_pic[x_coord_square, y_coord_square, :] = voxel_points_square_norm
occupy_pic[x_coord_square, y_coord_square] = number_buffer[:, np.newaxis]
occupy_max = occupy_pic.max()
assert(occupy_max > 0)
occupy_pic = occupy_pic / occupy_max
return occupy_pic, norm_pic
def project_pc(self, pc, gripper_width):
"""
获取手抓闭合区域中点云的投影
for gpd baseline, only support input_chann == [3, 12]
"""
pc = pc.astype(np.float32)
pc = pcl.PointCloud(pc)
norm = pc.make_NormalEstimation()
norm.set_KSearch(self.normal_K)
normals = norm.compute()
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
pc = pc.to_array()
grasp_pc = pc[self.in_ind]
grasp_pc_norm = surface_normal[self.in_ind]
bad_check = (grasp_pc_norm != grasp_pc_norm)
if np.sum(bad_check) != 0:
bad_ind = np.where(bad_check == True)
grasp_pc = np.delete(grasp_pc, bad_ind[0], axis=0)
grasp_pc_norm = np.delete(grasp_pc_norm, bad_ind[0], axis=0)
assert(np.sum(grasp_pc_norm != grasp_pc_norm) == 0)
m_width_of_pic = self.project_size
margin = self.projection_margin
order = np.array([0, 1, 2])
occupy_pic1, norm_pic1 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm, # 计算点云投影
order, gripper_width)
if self.project_chann == 3:
output = norm_pic1
elif self.project_chann == 12:
order = np.array([1, 2, 0])
occupy_pic2, norm_pic2 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm, # 计算点云投影
order, gripper_width)
order = np.array([0, 2, 1])
occupy_pic3, norm_pic3 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm, # 计算点云投影
order, gripper_width)
output = np.dstack([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3])
else:
raise NotImplementedError
return output
def __getitem__(self, index):
# 获取物体和抓取姿态索引
obj_ind, grasp_ind = np.unravel_index(index, (len(self.object), self.grasp_amount_per_file))
obj_grasp = self.object[obj_ind] # 物体名称, 用于获取抓取姿态
obj_pc = self.transform[obj_grasp][0] # 物体名称, 用于获取点云
f_grasp = self.d_grasp[obj_grasp] # 抓取姿态文件名
fl_pc = np.array(self.d_pc[obj_pc]) # 各视角点云文件名
np.random.shuffle(fl_pc) # 打乱文件
grasp = np.load(f_grasp)[grasp_ind] # 获取抓取姿态
pc = np.load(fl_pc[-1]) # 随机获取点云
t = self.transform[obj_grasp][1] # 获取扫描仪到点云的转换矩阵, 抓取姿态在扫描仪采集的mesh文件上获取, 须转换到
# debug
# level_score_, refine_score_ = grasp[-2:]
# score_ = level_score_ + refine_score_ * 0.01
# if score_ >= self.thresh_bad:
# print("label: 0")
# elif score_ <= self.thresh_good:
# print("label: 1")
grasp_pc = self.collect_pc(grasp, pc, t) # 获取手抓闭合区域中的点云
if grasp_pc is None:
return None
level_score, refine_score = grasp[-2:]
if not self.projection:
# 点数不够则有放回采样, 点数太多则随机采样
if len(grasp_pc) > self.grasp_points_num:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=False)].T
else:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=True)].T
else:
grasp_pc = grasp_pc.transpose((2, 1, 0)) # 调整通道顺序
# 根据score分类
score = level_score + refine_score*0.01
if score >= self.thresh_bad:
label = 0
elif score <= self.thresh_good:
label = 1
else:
return None
if self.with_obj:
return grasp_pc, label, obj_grasp
else:
# print("grasp_pc", grasp_pc, grasp_pc.shape, label) # (3, 750)
return grasp_pc, label
def __len__(self):
return self.amount
class PointGraspOneViewMultiClassDataset(torch.utils.data.Dataset):
def __init__(self, grasp_points_num, grasp_amount_per_file, thresh_good,
thresh_bad, path, tag, with_obj=False, projection=False, project_chann=3, project_size=60):
self.grasp_points_num = grasp_points_num
self.grasp_amount_per_file = grasp_amount_per_file
self.path = path
self.tag = tag
self.thresh_good = thresh_good
self.thresh_bad = thresh_bad
self.with_obj = with_obj
self.min_point_limit = 50
# projection related
self.projection = projection
self.project_chann = project_chann
if self.project_chann not in [3, 12]:
raise NotImplementedError
self.project_size = project_size
if self.project_size != 60:
raise NotImplementedError
self.normal_K = 10
self.voxel_point_num = 50
self.projection_margin = 1
self.minimum_point_amount = 150
self.transform = pickle.load(open(os.path.join(self.path, 'google2cloud.pkl'), 'rb'))
fl_grasp = glob.glob(os.path.join(path, 'ycb_grasp', self.tag, '*.npy'))
fl_pc = glob.glob(os.path.join(path, 'ycb_rgbd', '*', 'clouds', 'pc_NP3_NP5*.npy'))
self.d_pc, self.d_grasp = {}, {}
for i in fl_pc:
k = i.split('/')[-3]
if k in self.d_pc.keys():
self.d_pc[k].append(i)
else:
self.d_pc[k] = [i]
for k in self.d_pc.keys():
self.d_pc[k].sort()
for i in fl_grasp:
k = i.split('/')[-1].split('.')[0]
self.d_grasp[k] = i
object1 = set(self.d_grasp.keys())
object2 = set(self.transform.keys())
self.object = list(object1.intersection(object2))
self.amount = len(self.object) * self.grasp_amount_per_file
def collect_pc(self, grasp, pc, transform):
center = grasp[0:3]
axis = grasp[3:6] # binormal
width = grasp[6]
angle = grasp[7]
axis = axis/np.linalg.norm(axis)
binormal = axis
# cal approach
cos_t = np.cos(angle)
sin_t = np.sin(angle)
R1 = np.c_[[cos_t, 0, sin_t],[0, 1, 0],[-sin_t, 0, cos_t]]
axis_y = axis
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
approach = R2.dot(R1)[:, 0]
approach = approach / np.linalg.norm(approach)
minor_normal = np.cross(axis, approach)
left = center - width*axis/2
right = center + width*axis/2
left = (np.dot(transform, np.array([left[0], left[1], left[2], 1])))[:3]
right = (np.dot(transform, np.array([right[0], right[1], right[2], 1])))[:3]
center = (np.dot(transform, np.array([center[0], center[1], center[2], 1])))[:3]
binormal = (np.dot(transform, np.array([binormal[0], binormal[1], binormal[2], 1])))[:3].reshape(3, 1)
approach = (np.dot(transform, np.array([approach[0], approach[1], approach[2], 1])))[:3].reshape(3, 1)
minor_normal = (np.dot(transform, np.array([minor_normal[0], minor_normal[1], minor_normal[2], 1])))[:3].reshape(3, 1)
matrix = np.hstack([approach, binormal, minor_normal]).T
pc_p2c = (np.dot(matrix, (pc-center).T)).T
left_t = (-width * np.array([0,1,0]) / 2).squeeze()
right_t = (width * np.array([0,1,0]) / 2).squeeze()
x_limit = width/4
z_limit = width/4
y_limit = width/2
x1 = pc_p2c[:, 0] > -x_limit
x2 = pc_p2c[:, 0] < x_limit
y1 = pc_p2c[:, 1] > -y_limit
y2 = pc_p2c[:, 1] < y_limit
z1 = pc_p2c[:, 2] > -z_limit
z2 = pc_p2c[:, 2] < z_limit
a = np.vstack([x1, x2, y1, y2, z1, z2])
self.in_ind = np.where(np.sum(a, axis=0) == len(a))[0]
if len(self.in_ind) < self.min_point_limit:
return None
if self.projection:
return self.project_pc(pc_p2c, width)
else:
return pc_p2c[self.in_ind]
def check_square(self, point, points_g):
dirs = np.array([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1],
[-1, 1, -1], [1, 1, -1], [-1, -1, -1], [1, -1, -1]])
p = dirs * 0.5 + point # here res * 0.5 means get half of a pixel width
a1 = p[2][1] < points_g[:, 1]
a2 = p[0][1] > points_g[:, 1]
a3 = p[0][2] > points_g[:, 2]
a4 = p[4][2] < points_g[:, 2]
a5 = p[1][0] > points_g[:, 0]
a6 = p[0][0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
return points_in_area
def cal_projection(self, point_cloud_voxel, m_width_of_pic, margin, surface_normal, order, gripper_width):
occupy_pic = np.zeros([m_width_of_pic, m_width_of_pic, 1])
norm_pic = np.zeros([m_width_of_pic, m_width_of_pic, 3])
norm_pic_num = np.zeros([m_width_of_pic, m_width_of_pic, 1])
max_x = point_cloud_voxel[:, order[0]].max()
min_x = point_cloud_voxel[:, order[0]].min()
max_y = point_cloud_voxel[:, order[1]].max()
min_y = point_cloud_voxel[:, order[1]].min()
min_z = point_cloud_voxel[:, order[2]].min()
tmp = max((max_x - min_x), (max_y - min_y))
if tmp == 0:
print("WARNING : the num of input points seems only have one, no possilbe to do learning on"
"such data, please throw it away. -- Hongzhuo")
return occupy_pic, norm_pic
# Here, we use the gripper width to cal the res:
res = gripper_width / (m_width_of_pic-margin)
voxel_points_square_norm = []
x_coord_r = ((point_cloud_voxel[:, order[0]]) / res + m_width_of_pic / 2)
y_coord_r = ((point_cloud_voxel[:, order[1]]) / res + m_width_of_pic / 2)
z_coord_r = ((point_cloud_voxel[:, order[2]]) / res + m_width_of_pic / 2)
x_coord_r = np.floor(x_coord_r).astype(int)
y_coord_r = np.floor(y_coord_r).astype(int)
z_coord_r = np.floor(z_coord_r).astype(int)
voxel_index = | np.array([x_coord_r, y_coord_r, z_coord_r]) | numpy.array |
import numpy as np
import pandas as pd
from scipy.stats import norm, percentileofscore
from tqdm.notebook import tqdm
def rv_cc_estimator(sample,n=22):
"""
Realized volatility close to close calculation. Returns a time series of the realized volatility.
sample: series or dataframe of closing prices indexed by date
n: sample size period for the volatility
"""
sample_clean = sample.dropna()
returns = np.divide(sample_clean, sample_clean.shift(1))
log_returns = np.log(returns)
ann_log_returns = 252*np.power(log_returns,2)/n
return 100 * np.sqrt(ann_log_returns.rolling(window=n,min_periods=n).sum())
def cc_estimator(sample,n=22,days=1):
combined_rv = pd.Series()
sample_clean = sample.dropna()
for i in range(days):
staggered_samples = sample_clean[i::days]
returns = np.divide(staggered_samples, staggered_samples.shift(1))
log_returns = np.log(returns)
ann_log_returns = 252*np.power(log_returns,2)/n/days
sample_rv = 100 * np.sqrt(ann_log_returns.rolling(window=n,min_periods=n).sum())
combined_rv = pd.concat([combined_rv, sample_rv])
return combined_rv.sort_index()
def calc_period_var(sample, return_period=22, lookback=66):
"""
A period return's move normalized. Calculated as the squared move (variance) scaled by the period
sample: series or dataframe of closing prices indexed by date
"""
sample_clean = sample.dropna()
lookback_ret = sample_clean.pct_change(periods=return_period)
return (lookback_ret**2).rolling(window=lookback).mean() * 250 / return_period
def calc_var_ratio(sample, return_period=22, period_min=3, day_min=66):
"""
The variance ratio based on the normalized historical returns over a given rolling return period ratioed to the daily historical returns
sample: series or dataframe of closing prices indexed by date
return period:
"""
lookback = max(return_period * period_min, day_min)
period_var = calc_period_var(sample, return_period=return_period, lookback=lookback)
daily_var = calc_period_var(sample, return_period=1, lookback=lookback)
return period_var / daily_var
def calc_lfev(sample, return_period=22, period_min=3, day_min=66):
lookback = max(return_period * period_min, day_min)
period_var = calc_period_var(sample, return_period=return_period, lookback=lookback)
daily_var = calc_period_var(sample, return_period=1, lookback=lookback)
return (np.sqrt(period_var) - np.sqrt(daily_var)) * 100
def move_volatility(prices, days=66):
abs_move = (prices / prices.shift(days) - 1)
high_low = (prices.rolling(days+1).max() - prices.rolling(days+1).min()) / prices.shift(days)
return abs_move / high_low * np.abs(abs_move) * 100
def move_volatility_range(prices, days=66):
abs_move = (prices / prices.shift(days) - 1)
high_prices = prices.rolling(days+1).max()
low_prices = prices.rolling(days+1).min()
close_dist_high_low = ((high_prices - prices.shift(days)) + (low_prices - prices.shift(days))) / prices.shift(days)
high_low = (high_prices - low_prices) / prices.shift(days)
return close_dist_high_low * (0.5 * (np.abs(abs_move) + high_low)) / high_low * 100
def generate_returns_dict(prices, undl_list, return_days):
returns = {}
for u in undl_list:
returns[u] = pd.DataFrame()
for i in return_days:
close_prices = prices[u, 'Close'].dropna()
returns[u][i] = (close_prices / close_prices.shift(i) - 1) * 100
return returns
def rolling_trend(prices, undl_list, return_days, smoothing=5):
'''Determines the trend by blending the returns across different periods and smooths the results.'''
avg_returns_dict = {}
returns_summary = {}
returns_dict = generate_returns_dict(prices, undl_list, return_days)
for u in undl_list:
avg_returns_dict[u] = pd.DataFrame()
for i in return_days:
avg_returns_dict[u]['{}D Trend'.format(i)] = returns_dict[u][i].dropna().rolling(smoothing).mean() / np.sqrt(i)
avg_returns_dict[u]['Average Trend'] = avg_returns_dict[u].dropna().mean(axis=1)
if len(avg_returns_dict[u].dropna()) > 0:
returns_summary[u] = avg_returns_dict[u]['Average Trend'].dropna()[-1]
returns_summary = pd.Series(returns_summary)
return returns_summary, avg_returns_dict
def spot_stats(sample, n=260):
"""
Simple spot statistics returning the distance in % terms from the last spot to the max spot in the period, distance to min spot, and current percentile in min to max.
sample: series or dataframe of closing prices
n: historical lookback period.
"""
spot_window = sample.dropna()[-n:]
percentile = percentileofscore(spot_window, spot_window[-1])
high = spot_window.max()
low = spot_window.min()
max_pct = (high / spot_window[-1] - 1) * 100
min_pct = (low / spot_window[-1] - 1) * 100
return max_pct, min_pct, percentile
def past_spot_ranges(sample, n=22, haircut=0.2, intraday=True):
'''Finds Returns the past n spot range based on max/min of the period'''
if intraday:
sample_max = (sample['High'].rolling(n).max() / sample['Close'].shift(n) - 1) * 100
sample_min = (sample['Low'].rolling(n).min() / sample['Close'].shift(n) - 1) * 100
else:
sample_max = (sample['Close'].rolling(n).max() / sample['Close'].shift(n) - 1) * 100
sample_min = (sample['Close'].rolling(n).min() / sample['Close'].shift(n) - 1) * 100
delta_scale = 1 - haircut # Set a more conservative estimate of the range.
return pd.concat([abs(sample_max) * delta_scale, abs(sample_min) * delta_scale], axis=1).max(axis=1)
def past_abs_returns(sample, n=5):
return np.abs((1 - sample['Close'].shift(n) / sample['Close']) * 100)
def varvolbreakeven(var, vol):
b = -1
a = 1 / (2 * var)
c = vol - var / 2
breakeven1 = (-b + np.sqrt(b**2 - 4 * a * c)) / (2 * a)
breakeven2 = (-b - np.sqrt(b**2 - 4 * a * c)) / (2 * a)
return breakeven1, breakeven2
def zscore_calc(hist_data, live_data):
return (live_data - hist_data.mean()) / hist_data.std()
def var_payout(realized,strike):
return 0.5 * (realized**2 / strike - strike)
def vol_payout(realized, strike):
return realized - strike
class BlackScholes:
def __init__(self, s, k, r, q, vol, t, payoff):
"""vol is expressed in %. eg enter 16v as 16, not 0.16"""
self.s = s
self.k = k
self.r = r
self.q = q
self.vol = vol / 100
self.t = t
self.payoff = payoff
def d1(self):
return ( | np.log(self.s / self.k) | numpy.log |
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import numpy as np
from .layers.contrastive import ContrastiveLoss
from .layers.utils import l1norm, l2norm
from .layers.img_enc import EncoderImage
from .layers.txt_enc import EncoderText
class VisualSA(nn.Layer):
"""
Build global image representations by self-attention.
Args: - local: local region embeddings, shape: (batch_size, 36, 1024)
- raw_global: raw image by averaging regions, shape: (batch_size, 1024)
Returns: - new_global: final image by self-attention, shape: (batch_size, 1024).
"""
def __init__(self, embed_dim, dropout_rate, num_region):
super(VisualSA, self).__init__()
self.embedding_local = nn.Sequential(nn.Linear(embed_dim, embed_dim),
nn.BatchNorm1D(num_region),
nn.Tanh(), nn.Dropout(dropout_rate))
self.embedding_global = nn.Sequential(nn.Linear(embed_dim, embed_dim),
nn.BatchNorm1D(embed_dim),
nn.Tanh(), nn.Dropout(dropout_rate))
self.embedding_common = nn.Sequential(nn.Linear(embed_dim, 1))
self.init_weights()
def init_weights(self):
for embeddings in self.children():
for m in embeddings:
if isinstance(m, nn.Linear):
r = np.sqrt(6.) / np.sqrt(m.weight.shape[0] + m.weight.shape[1])
v = np.random.uniform(-r, r, size=(m.weight.shape[0], m.weight.shape[1])).astype('float32')
b = np.zeros(m.bias.shape).astype('float32')
m.weight.set_value(v)
m.bias.set_value(b)
elif isinstance(m, nn.BatchNorm1D):
a = np.ones(m.weight.shape).astype('float32')
b = np.zeros(m.bias.shape).astype('float32')
m.weight.set_value(a)
m.bias.set_value(b)
def forward(self, local, raw_global):
# compute embedding of local regions and raw global image
l_emb = self.embedding_local(local)
g_emb = self.embedding_global(raw_global)
# compute the normalized weights, shape: (batch_size, 36)
g_emb = paddle.concat([g_emb.unsqueeze(1) for _ in range(l_emb.shape[1])], axis=1)
common = paddle.multiply(l_emb, g_emb)
weights = self.embedding_common(common).squeeze(2)
weights = F.softmax(weights, axis=1)
# compute final image, shape: (batch_size, 1024)
new_global = (weights.unsqueeze(2) * local).sum(axis=1)
new_global = l2norm(new_global, dim=-1)
return new_global
class TextSA(nn.Layer):
"""
Build global text representations by self-attention.
Args: - local: local word embeddings, shape: (batch_size, L, 1024)
- raw_global: raw text by averaging words, shape: (batch_size, 1024)
Returns: - new_global: final text by self-attention, shape: (batch_size, 1024).
"""
def __init__(self, embed_dim, dropout_rate):
super(TextSA, self).__init__()
self.embedding_local = nn.Sequential(nn.Linear(embed_dim, embed_dim),
nn.Tanh(), nn.Dropout(dropout_rate))
self.embedding_global = nn.Sequential(nn.Linear(embed_dim, embed_dim),
nn.Tanh(), nn.Dropout(dropout_rate))
self.embedding_common = nn.Sequential(nn.Linear(embed_dim, 1))
self.init_weights()
def init_weights(self):
for embeddings in self.children():
for m in embeddings:
if isinstance(m, nn.Linear):
r = np.sqrt(6.) / np.sqrt(m.weight.shape[0] + m.weight.shape[1])
v = np.random.uniform(-r, r, size=(m.weight.shape[0], m.weight.shape[1])).astype('float32')
b = np.zeros(m.bias.shape).astype('float32')
m.weight.set_value(v)
m.bias.set_value(b)
elif isinstance(m, nn.BatchNorm1D):
a = np.ones(m.weight.shape).astype('float32')
b = np.zeros(m.bias.shape).astype('float32')
m.weight.set_value(a)
m.bias.set_value(b)
def forward(self, local, raw_global):
# compute embedding of local words and raw global text
l_emb = self.embedding_local(local)
g_emb = self.embedding_global(raw_global)
# compute the normalized weights, shape: (batch_size, L)
g_emb = paddle.concat([g_emb.unsqueeze(1) for _ in range(l_emb.shape[1])], axis=1)
common = paddle.multiply(l_emb, g_emb)
weights = self.embedding_common(common).squeeze(2)
weights = F.softmax(weights, axis=1)
# compute final text, shape: (batch_size, 1024)
new_global = (weights.unsqueeze(2) * local).sum(axis=1)
new_global = l2norm(new_global, dim=-1)
return new_global
class GraphReasoning(nn.Layer):
"""
Perform the similarity graph reasoning with a full-connected graph
Args: - sim_emb: global and local alignments, shape: (batch_size, L+1, 256)
Returns; - sim_sgr: reasoned graph nodes after several steps, shape: (batch_size, L+1, 256)
"""
def __init__(self, sim_dim):
super(GraphReasoning, self).__init__()
self.graph_query_w = nn.Linear(sim_dim, sim_dim)
self.graph_key_w = nn.Linear(sim_dim, sim_dim)
self.sim_graph_w = nn.Linear(sim_dim, sim_dim)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(axis=-1)
self.init_weights()
def init_weights(self):
for m in self.children():
if isinstance(m, nn.Linear):
r = np.sqrt(6.) / np.sqrt(m.weight.shape[0] + m.weight.shape[1])
v = np.random.uniform(-r, r, size=(m.weight.shape[0], m.weight.shape[1])).astype('float32')
b = np.zeros(m.bias.shape).astype('float32')
m.weight.set_value(v)
m.bias.set_value(b)
elif isinstance(m, nn.BatchNorm1D):
a = np.ones(m.weight.shape).astype('float32')
b = np.zeros(m.bias.shape).astype('float32')
m.weight.set_value(a)
m.bias.set_value(b)
def forward(self, sim_emb):
sim_query = self.graph_query_w(sim_emb)
sim_key = self.graph_key_w(sim_emb)
sim_edge = self.softmax(paddle.bmm(sim_query, paddle.transpose(sim_key, (0, 2, 1))))
sim_sgr = paddle.bmm(sim_edge, sim_emb)
sim_sgr = self.relu(self.sim_graph_w(sim_sgr))
return sim_sgr
class AttentionFiltration(nn.Layer):
"""
Perform the similarity Attention Filtration with a gate-based attention
Args: - sim_emb: global and local alignments, shape: (batch_size, L+1, 256)
Returns; - sim_saf: aggregated alignment after attention filtration, shape: (batch_size, 256)
"""
def __init__(self, sim_dim):
super(AttentionFiltration, self).__init__()
self.attn_sim_w = nn.Linear(sim_dim, 1)
self.bn = nn.BatchNorm1D(1)
self.sigmoid = nn.Sigmoid()
self.init_weights()
def init_weights(self):
for m in self.children():
if isinstance(m, nn.Linear):
r = np.sqrt(6.) / np.sqrt(m.weight.shape[0] + m.weight.shape[1])
v = np.random.uniform(-r, r, size=(m.weight.shape[0], m.weight.shape[1])).astype('float32')
b = np.zeros(m.bias.shape).astype('float32')
m.weight.set_value(v)
m.bias.set_value(b)
elif isinstance(m, nn.BatchNorm1D):
a = np.ones(m.weight.shape).astype('float32')
b = np.zeros(m.bias.shape).astype('float32')
m.weight.set_value(a)
m.bias.set_value(b)
def forward(self, sim_emb):
sim_attn = self.attn_sim_w(sim_emb)
sim_attn = paddle.transpose(sim_attn, (0, 2, 1))
sim_attn = l1norm(self.sigmoid(self.bn(sim_attn)), dim=-1)
sim_saf = paddle.matmul(sim_attn, sim_emb)
sim_saf = l2norm(sim_saf.squeeze(1), dim=-1)
return sim_saf
class EncoderSimilarity(nn.Layer):
"""
Compute the image-text similarity by SGR, SAF, AVE
Args: - img_emb: local region embeddings, shape: (batch_size, 36, 1024)
- cap_emb: local word embeddings, shape: (batch_size, L, 1024)
Returns:
- sim_all: final image-text similarities, shape: (batch_size, batch_size).
"""
def __init__(self, embed_size, sim_dim, module_name='AVE', sgr_step=3):
super(EncoderSimilarity, self).__init__()
self.module_name = module_name
self.v_global_w = VisualSA(embed_size, 0.4, 36)
self.t_global_w = TextSA(embed_size, 0.4)
self.sim_tranloc_w = nn.Linear(embed_size, sim_dim)
self.sim_tranglo_w = nn.Linear(embed_size, sim_dim)
self.sim_eval_w = nn.Linear(sim_dim, 1)
self.sigmoid = nn.Sigmoid()
if module_name == 'SGR':
self.SGR_module = nn.Sequential()
for i in range(sgr_step):
self.SGR_module.add_sublayer(f'SGR_{i}', GraphReasoning(sim_dim))
# self.SGR_module = nn.Sequential((GraphReasoning(sim_dim) for i in range(sgr_step))).append()
elif module_name == 'SAF':
self.SAF_module = AttentionFiltration(sim_dim)
else:
raise ValueError('Invalid input of module_name')
self.init_weights()
def init_weights(self):
for m in self.children():
if isinstance(m, nn.Linear):
r = np.sqrt(6.) / np.sqrt(m.weight.shape[0] + m.weight.shape[1])
v = np.random.uniform(-r, r, size=(m.weight.shape[0], m.weight.shape[1])).astype('float32')
b = np.zeros(m.bias.shape).astype('float32')
m.weight.set_value(v)
m.bias.set_value(b)
elif isinstance(m, nn.BatchNorm1D):
a = np.ones(m.weight.shape).astype('float32')
b = np.zeros(m.bias.shape).astype('float32')
m.weight.set_value(a)
m.bias.set_value(b)
def forward(self, img_emb, cap_emb, cap_lens):
sim_all = []
n_image = img_emb.shape[0]
n_caption = cap_emb.shape[0]
# get enhanced global images by self-attention
img_ave = paddle.mean(img_emb, 1)
img_glo = self.v_global_w(img_emb, img_ave)
for i in range(n_caption):
# get the i-th sentence
n_word = cap_lens[i]
cap_i = cap_emb[i, :n_word, :].unsqueeze(0)
cap_i_expand = paddle.concat([cap_i for _ in range(n_image)], axis=0)
# get enhanced global i-th text by self-attention
cap_ave_i = paddle.mean(cap_i, 1)
cap_glo_i = self.t_global_w(cap_i, cap_ave_i)
# local-global alignment construction
Context_img = SCAN_attention(cap_i_expand, img_emb, smooth=9.0)
sim_loc = paddle.pow(paddle.subtract(Context_img, cap_i_expand), 2)
sim_loc = l2norm(self.sim_tranloc_w(sim_loc), dim=-1)
sim_glo = paddle.pow(paddle.subtract(img_glo, cap_glo_i), 2)
sim_glo = l2norm(self.sim_tranglo_w(sim_glo), dim=-1)
# concat the global and local alignments
sim_emb = paddle.concat([sim_glo.unsqueeze(1), sim_loc], 1)
# compute the final similarity vector
if self.module_name == 'SGR':
for i in range(len(self.SGR_module)):
sim_emb = self.SGR_module[f'SGR_{i}'](sim_emb)
sim_vec = sim_emb[:, 0, :]
else:
sim_vec = self.SAF_module(sim_emb)
# compute the final similarity score
sim_i = self.sigmoid(self.sim_eval_w(sim_vec))
sim_all.append(sim_i)
# (n_image, n_caption)
sim_all = paddle.concat(sim_all, 1)
return sim_all
def SCAN_attention(query, context, smooth, eps=1e-8):
"""
query: (n_context, queryL, d)
context: (n_context, sourceL, d)
"""
# --> (batch, d, queryL)
queryT = paddle.transpose(query, (0, 2, 1))
# (batch, sourceL, d)(batch, d, queryL)
# --> (batch, sourceL, queryL)
attn = paddle.bmm(context, queryT)
attn = nn.LeakyReLU(0.1)(attn)
attn = l2norm(attn, 2)
# --> (batch, queryL, sourceL)
attn = paddle.transpose(attn, (0, 2, 1))
# --> (batch, queryL, sourceL
attn = F.softmax(attn*smooth, axis=2)
# --> (batch, sourceL, queryL)
attnT = paddle.transpose(attn, (0, 2, 1))
# --> (batch, d, sourceL)
contextT = paddle.transpose(context, (0, 2, 1))
# (batch x d x sourceL)(batch x sourceL x queryL)
# --> (batch, d, queryL)
weightedContext = paddle.bmm(contextT, attnT)
# --> (batch, queryL, d)
weightedContext = paddle.transpose(weightedContext, (0, 2, 1))
weightedContext = l2norm(weightedContext, dim=-1)
return weightedContext
class SGRAF(nn.Layer):
"""
Similarity Reasoning and Filtration (SGRAF) Network
"""
def __init__(self,
model_name,
module_name,
sgr_step,
embed_size,
sim_dim,
vocab_size,
word_dim,
num_layers,
image_dim,
margin,
max_violation,
use_bi_gru=True,
image_norm=True,
text_norm=True,
**kwargs):
super(SGRAF, self).__init__()
self.img_enc = EncoderImage(model_name, image_dim, embed_size, image_norm)
self.txt_enc = EncoderText(model_name, vocab_size, word_dim, embed_size, num_layers,
use_bi_gru=use_bi_gru, text_norm=text_norm)
self.sim_enc = EncoderSimilarity(embed_size, sim_dim, module_name, sgr_step)
self.criterion = ContrastiveLoss(margin=margin, max_violation=max_violation)
def forward_emb(self, batch):
images = batch['image_feat']
captions = batch['text_token']
lengths = batch['text_len']
img_embs = self.img_enc(images)
cap_embs = self.txt_enc(captions, lengths)
return img_embs, cap_embs, lengths
def forward_sim(self, batch):
img_embs, cap_embs, cap_lens = batch
sims = self.sim_enc(img_embs, cap_embs, cap_lens)
return sims
def forward(self, batch):
images = batch['image_feat']
captions = batch['text_token']
lengths = batch['text_len']
img_embs = self.img_enc(images)
cap_embs = self.txt_enc(captions, lengths)
sims = self.sim_enc(img_embs, cap_embs, lengths)
loss = self.criterion(sims)
return loss
@staticmethod
def cal_sim(model, img_embs, cap_embs, cap_lens, **kwargs):
shard_size = kwargs.get('shard_size', 100)
n_im_shard = (len(img_embs) - 1) // shard_size + 1
n_cap_shard = (len(cap_embs) - 1) // shard_size + 1
sims = np.zeros((len(img_embs), len(cap_embs)))
for i in range(n_im_shard):
im_start, im_end = shard_size * i, min(shard_size * (i + 1), len(img_embs))
for j in range(n_cap_shard):
ca_start, ca_end = shard_size * j, min(shard_size * (j + 1), len(cap_embs))
with paddle.no_grad():
im = paddle.to_tensor(img_embs[im_start:im_end], dtype='float32')
ca = paddle.to_tensor(cap_embs[ca_start:ca_end], dtype='float32')
l = paddle.to_tensor(cap_lens[ca_start:ca_end], dtype='int64')
sim = model.forward_sim((im, ca, l))
sims[im_start:im_end, ca_start:ca_end] = | np.array(sim) | numpy.array |
#!/usr/bin/env python
## Copyright (c) 2009, <NAME>
## Original Matlab version of GC2D, <NAME>
## GC2D first converted to Python/NumPy in 2009 by <NAME>
################################################################
# NOTE: TopoFlow can provide "mass balance" for GC2D, but
# the timescales are very different. TopoFlow should
# pass some kind of "net" or cumulative "mass balance"
# to GC2D at its large timestep.
#
# NOTE: There is no "load_mask()" function yet, but it is
# called in a "try" block.
#
# NOTE: THERMAL_TOGGLE option does not work yet.
# See notes below regarding undefined vars.
#
# NOTE: Should carefully test update_vars() due to
# a bug fix and other changes to the code.
# Compare to update_vars_OLD().
#
# NOTE: Check that all "numpy" function calls include "numpy.".
# Fixed calls to "mean()", "nonzero()", "ravel()",
# abs() vs. absolute(), max(A,B) vs. maximum(A,B), etc.
#
################################################################
import numpy
import time
import sys
import logging
# import getopt
import scipy # scipy.signal.convolve, scipy.io.loadmat
from scipy import interpolate
from scipy import signal
# SDP. 10/24/11. No longer available. Deprecated?
# from scipy.io.numpyio import fwrite # used by print_watch_point()
#--------------------------------------------------------------------------------------------------
# run_model() # (for testing)
# ------------------------------
# Classes (used as structures)
# ------------------------------
# MassBalance
# BoundaryCond
# Parameters
# InputParams
# OutputParams
# Toggles
#
# -----------
# Functions
# -----------
# compress_grid()
# filter2d()
# add_halo()
# set_bc()
# difference_grid()
# basal_shear_stress()
# iceflow()
# ice_sliding()
# sum_ice_motion()
# avalanche()
# calve()
# mass_balance()
# mass_conservation()
# load_dem()
# load_dem_var()
# load_mask() ###### Not written, but called. #####
# get_timestep()
# update_vars()
# print_watch_point()
# update()
# init_valley_glacier()
# init_ice_sheet()
# resample_dem()
# init_ice_surface()
# load_state()
# #### load_state_old()
# #### run_for()
#--------------------------------------------------------------------------------------------------
def run_model(t_max=10.0, DEM_file='Animas_200.mat', SILENT=False):
Toggles.VARIABLE_DT_TOGGLE = 0 # (or change to 1)
###################################
print('Starting GC2D test run...')
print('Reading input file...')
( H, Zb, Zi, dx, dy ) = load_state(DEM_file=DEM_file,
RESTART_TOGGLE = 0,
INIT_COND_TOGGLE=1 )
ny, nx = Zb.shape
#------------------
# Initialize vars
#------------------
t = numpy.float64(0)
conserveIce = numpy.float64(0) # (total ice mass ??)
meltrate = numpy.zeros( (ny, nx), dtype='float64' )
## fd_watch = {}
## fd_watch['thick'] = open( 'thickness_py.bin' , 'wb' )
## counter = 0
while (t < t_max):
(dt, t, H, Zi, meltrate, conserveIce) = update( t, H, Zb, dx, dy,
meltrate, conserveIce,
SILENT=SILENT)
## COMPRESS_TOGGLE = Toggles.COMPRESS_TOGGLE,
## ICEFLOW_TOGGLE = Toggles.ICEFLOW_TOGGLE,
## ICESLIDE_TOGGLE = Toggles.ICESLIDE_TOGGLE,
## VARIABLE_DT_TOGGLE = Toggles.VARIABLE_DT_TOGGLE,
## dtDefault=Parameters.dtDefault,
## dtMax=Parameters.dtMax)
#-----------------------
# Print a short report
#-----------------------
print(' ')
print('(nx, ny) =', nx, ny)
print('(dx, dy) =', dx, dy)
print('(Hmin, Hmax) =', H.min(), H.max())
print('(Zbmin, Zbmax) =', Zb.min(), Zb.max())
print('(Zimin, Zimax) =', Zi.min(), Zi.max())
print('(MRmin, MRmax) =', meltrate.min(), meltrate.max())
print('conserveIce =', conserveIce)
print('Finished.')
print(' ')
# run_model()
#--------------------------------------------------------------------------------------------------
class MassBalance: # (enumeration)
( BAD_VAL ,
ZERO_BALANCE ,
CONSTANT_ELA ,
ELA_LOWERING ,
ELA_TIME_SERIES ,
EXTERNAL_FUNC ,
ELA_LOWERING2 ,
BALANCE_FILE ,
D180_TIME_SERIES ) = list(range( 9))
# class MassBalance
#--------------------------------------------------------------------------------------------------
class BoundaryCond: # (enumeration)
( BAD_VAL ,
ICE_FREE_BOUND ,
ZERO_FLUX_BOUND ,
CONST_FLUX_BOUND ,
SURF_ELEV_BOUND ,
SURF_SLOPE_BOUND ) = list(range( 6))
# class BoundaryCond
#--------------------------------------------------------------------------------------------------
class Parameters: # (structure)
# Constants
g = numpy.float64(9.81) # gravitional acceleration [m/s**2]
rhoI = numpy.float64(917) # density of ice [kg/m**3]
rhoW = numpy.float64(1000) # density of water [kg/m**3]
day = numpy.float64(0.00274) # length of a day in years [years]
# Time
t = numpy.float64(0) # set time to zero
tMax = numpy.float64(100000) # maximum simulation time in years
dtMax = numpy.float64(0.4 * 365*day) # maximum timestep in years
dtDefault = dtMax # timestep if VARIABLE_DT_TOGGLE==0
sec_per_year = numpy.float64(3600) * 24 * 365 # (SDP, 9/30/09)
# Glacier Properties
MinGlacThick = numpy.float64(1)
# Ice Deformation
glensA = numpy.float64( (6.8e-15)*3.15e7/(1e9) ) # Patterson, 1994; MacGregor, 2000
## glensA = numpy.float64( 6.8 * 3.15 * 1e-17)
# Attractor Sliding -- only if ICESLIDE_TOGGLE==1 (generally used)
UsChar = numpy.float64(10)
taubChar = numpy.float64(100000)
# Standard Sliding -- used if ICESLIDE_TOGGLE==2 (generally not used)
B = numpy.float64(0.0012) # m/(Pa*yr) -- MacGregor, 2000
DepthToWaterTable = numpy.float64(20) # distance from ice surface to water table
MaxFloatFraction = numpy.float64(80) # limits water level in ice
Hpeff = numpy.float64(20) # effective pressure (meters of water)
# Mass Balance
initELA = numpy.float64(3350) # (valley glaciers, try 3500 ice sheets)
ELAStepSize = numpy.float64(-50)
ELAStepInterval = numpy.float64(500)
gradBz = numpy.float64(0.01)
maxBz = numpy.float64(2)
tmin = numpy.float64(200) # Years, spin-up time
# Avalanching
angleOfRepose = numpy.float64(30)
avalanchFreq = numpy.float64(3) # average number per year
# Calving
seaLevel = numpy.float64(-100) # meters
calvingCoef = numpy.float64(2) # year^-1
# Thermal
c = numpy.float64(2060) # specific heat capacity (J/(kg*K))
Qg = numpy.float64(0.05 * 3.15e7) # Geothermal heat flux (W/m^2)*seconds/year = (J/year)/(m^2)
gradTz = numpy.float64(-0.0255) # Geothermal Gradient
# Only for Ice Sheets ???
Hbound = numpy.float64(2000)
Elev0 = numpy.float64(0) # reference elevation
To = numpy.float64(2.6) # temperature at Elev0
lapseRate = numpy.float64(-0.0065) # degrees per meter
# class Parameters
#--------------------------------------------------------------------------------------------------
class InputParams: # (structure)
CLEAR_FIGURE = 1
CONTOUR_INTERVAL = 50.
DEBUG_TOGGLE = 0
DT_LIMIT = 0
ELA_CONTOUR = 1.
ICE_CONTOUR = 1.
NEW_FIGURE = 0
QUIVER_VECS = 0
RECONSTRUCT = 0
SUBFIGURE = 0
THERMAL_CONTOUR = 0
# class InputParams
#--------------------------------------------------------------------------------------------------
class OutputParams: # (structure)
plotInterval = 60 * 120 # seconds
saveInterval = 100 # whole years
reportInterval = 30 # seconds
nextPlot = 0 # initialize to plot on first timestep
nextSave = 0 # initialize to save on first timestep
nextReport = 0 # initialize to report on first timestep
outputFile = 'savetmp'
# class OutputParams
#--------------------------------------------------------------------------------------------------
class Toggles: # (structure)
#------------------------
# Code behavior toggles
#-----------------------------------------------------------
# Toggle or turn on/off segments of the code or select
# between multiple possibilities for a given process.
# Values can be reset in INIT_COND segment.
# Note that many of these are unused in current version.
#-----------------------------------------------------------
GUISTART_TOGGLE = 0 # started simulation with the gui (off|on)
SAVE_TOGGLE = 1 # saving (off|on)
PLOT_TOGGLE = 1 # plotting (off|on)
REPORT_TOGGLE = 1 # reporting (off|on)
COMPRESS_TOGGLE = 0 # only simulate area with ice (off|on)
VARIABLE_DT_TOGGLE = 0 # state dependent time step (off|on)
INIT_COND_TOGGLE = 1 # load DEM and climate (synth|valley|sheet)
GENERIC_ICE_TOGGLE = 0 # start with generic ice surface (off|on)
ICEFLOW_TOGGLE = 1 # ice motion by deformation (off|on)
ICESLIDE_TOGGLE = 0 # ice motion by sliding (off|on|select)
THERMAL_TOGGLE = 0 # temp dependance of flow (off|on)
FREEZEON_TOGGLE = 0 # basal ice freeze to bed (off|on)
AVALANCHE_TOGGLE = 0 # avalanche off steep surfaces (off|on)
CALVING_TOGGLE = 0 # calving front (off|on)
ERODE_TOGGLE = 0 # erode the bed (off|on|select)
## CRN_TOGGLE = 0 # CRN accumulation (off|on)
# MASS_BALANCE_TOGGLE = MassBalance.ELA_LOWERING # select climate scenerio (off|on|select)
MASS_BALANCE_TOGGLE = MassBalance.CONSTANT_ELA # select climate scenerio (off|on|select)
WEST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
EAST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
SOUTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
NORTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
# class Toggles
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
def compress_grid( H , Zb , COMPRESS_TOGGLE=False , RESTART_TOGGLE=0,
THERMAL_TOGGLE=False ):
# COMPRESS - ONLY SIMULATE SUB-RECTANGLE THAT CONTAINS ICE
if (COMPRESS_TOGGLE) and (H.max() > 1) and (RESTART_TOGGLE != 2):
H_FullSpace = H.copy()
Zb_FullSpace = Zb.copy()
if (THERMAL_TOGGLE):
Ts_FullSpace = Ts.copy()
Tb_FullSpace = Tb.copy()
Tm_FullSpace = Tm.copy()
#[indrw,indcl] = find(H ~= 0);
indrw, indcl = numpy.where( H != 0 )
mxrw, mxcl = Zb.shape
mnrw = max( 0 , min(indrw) - 2 )
mxrw = min( mxrw , max(indrw) + 2 )
mncl = max( 0 , min(indcl) - 2 )
mxcl = min( mxcl , max(indcl) + 2 )
H = H [ mnrw:mxrw , mncl:mxcl ]
Zb = Zb[ mnrw:mxrw , mncl:mxcl ]
## Zi = Zb + max( H, 0 )
## Zi = Zb + numpy.choose( H<0 , (H,0) )
Zi = Zb + numpy.maximum(H, 0)
if (THERMAL_TOGGLE):
Ts = Ts[ mnrw:mxrw , mncl:mxcl ]
Tb = Tb[ mnrw:mxrw , mncl:mxcl ]
Tm = Tm[ mnrw:mxrw , mncl:mxcl ]
ny, nx = H.shape
mx_ny, mx_nx = Zb_FullSpace.shape
ny, nx = Zb.shape
compression_ratio = (mx_nx * mx_ny) / (nx * ny)
COMPRESSED_FLAG = 1
else:
## Zi = Zb + max( H, 0 ) # included for restarts
## Zi = Zb + numpy.choose( H<0 , (H,0) )
Zi = Zb + numpy.maximum(H, 0)
compression_ratio = 1.
COMPRESSED_FLAG = 0
return ( Zi , compression_ratio , COMPRESSED_FLAG )
# compress_grid()
#--------------------------------------------------------------------------------------------------
def filter2d( b , x , shape='same' ):
return scipy.signal.convolve( b , x , mode=shape )
# filter2d()
#--------------------------------------------------------------------------------------------------
def add_halo( x ):
x_ext = numpy.concatenate( ( x[:,0,numpy.newaxis] , x , x[:,-1,numpy.newaxis] ) , axis=1 )
x_ext = numpy.concatenate( ( [x_ext[0,:]] , x_ext , [x_ext[-1,:]] ) )
return x_ext
# add_halo()
#--------------------------------------------------------------------------------------------------
def set_bc( H , Zb , Zi ,
THERMAL_TOGGLE = Toggles.THERMAL_TOGGLE,
WEST_BC_TOGGLE = Toggles.WEST_BC_TOGGLE,
EAST_BC_TOGGLE = Toggles.EAST_BC_TOGGLE,
SOUTH_BC_TOGGLE = Toggles.SOUTH_BC_TOGGLE,
NORTH_BC_TOGGLE = Toggles.NORTH_BC_TOGGLE ):
## WEST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND ,
## EAST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND ,
## SOUTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND ,
## NORTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND ):
#-------------------------------------------------------
# MODIFY BOUNDARY CELLS TO ENFORCE BOUNDARY CONDITIONS
#-------------------------------------------------------
# DEFAULT BOUNDARY CONDITION IS ZERO FLUX
#-------------------------------------------------------
H_ext = add_halo( H )
Zb_ext = add_halo( Zb )
Zi_ext = add_halo( Zi )
if (THERMAL_TOGGLE):
Ts_ext = add_halo( Ts )
Tb_ext = add_halo( Tb )
Tm_ext = add_halo( Tm )
# WESTERN BOUNDARY CONDITION
if WEST_BC_TOGGLE == BoundaryCond.SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = numpy.mean(Zb[:,0]) + Hbound
H_ext[:,0] = ZiBound - Zb_ext[:,0]
elif WEST_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif WEST_BC_TOGGLE == BoundaryCond.SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[:,0] = 2*Zi_ext[:,1] - Zi_ext[:,2]
H_ext [:,0] = Zi_ext[:,0] - Zb_ext[:,0]
H_ext [:,0] = numpy.maximum( H_ext[:,0], 0 )
elif WEST_BC_TOGGLE == BoundaryCond.ICE_FREE_BOUND: # Ice Free Boundary
H_ext[:,0] = 0
# EASTERN BOUNDARY CONDITION
if EAST_BC_TOGGLE == BoundaryCond.SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = numpy.mean(Zb[:,-1]) + Hbound
H_ext[:,-1] = ZiBound - Zb_ext[:,-1]
elif EAST_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif EAST_BC_TOGGLE == BoundaryCond.SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[:,-1] = 2*Zi_ext[:,-2] - Zi_ext[:,-3]
H_ext [:,-1] = Zi_ext[:,-1] - Zb_ext[:,-1]
H_ext [:,-1] = numpy.maximum( H_ext[:,-1], 0)
elif EAST_BC_TOGGLE == BoundaryCond.ICE_FREE_BOUND: # Ice Free Boundary
H_ext[:,-1] = 0
# SOUTHERN BOUNDARY CONDITION
if SOUTH_BC_TOGGLE == BoundaryCond.SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = numpy.mean(Zb[0,:]) + Hbound
H_ext[0,:] = ZiBound - Zb_ext[0,:]
elif SOUTH_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif SOUTH_BC_TOGGLE == BoundaryCond.SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[0,:] = 2*Zi_ext[1,:] - Zi_ext[2,:]
H_ext [0,:] = Zi_ext[0,:] - Zb_ext[0,:]
H_ext [0,:] = numpy.maximum( H_ext[0,:], 0 )
elif SOUTH_BC_TOGGLE == BoundaryCond.ICE_FREE_BOUND: # Ice Free Boundary
H_ext[0,:] = 0
# NORTHERN BOUNDARY CONDITION
if NORTH_BC_TOGGLE == BoundaryCond.SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = numpy.mean(Zb[-1,:]) + Hbound
H_ext[-1,:] = ZiBound - Zb_ext[-1,:]
elif NORTH_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif NORTH_BC_TOGGLE == BoundaryCond.SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[-1,:] = 2*Zi_ext[-2,:] - Zi_ext[-3,:]
H_ext [-1,:] = Zi_ext[-1,:] - Zb_ext[-1,:]
H_ext [-1,:] = numpy.maximum( H_ext[-1,:], 0 )
elif NORTH_BC_TOGGLE == BoundaryCond.ICE_FREE_BOUND: # Ice Free Boundary
H_ext[-1,:] = 0
Zi_ext = Zb_ext + H_ext
return ( H_ext , Zb_ext , Zi_ext )
# set_bc()
#--------------------------------------------------------------------------------------------------
def difference_grid( A , dx , dy ):
dAdx_ext = ( A[:,1:] - A[:,:-1] ) / dx
dAdy_ext = ( A[1:,:] - A[:-1,:] ) / dy
dAdx = dAdx_ext[1:-1,:]
dAdy = dAdy_ext[:,1:-1]
return ( dAdx , dAdy )
# difference_grid()
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
def basal_shear_stress( H_ext , Zi_ext , dx=1. , dy=1. ,
g=Parameters.g , rhoI=Parameters.rhoI ):
#------------------------------------
# CALCULATE THE BASAL SHEAR STRESS
#------------------------------------
# forward differences (could use difference_grid())
dZidxX_ext = ( Zi_ext[:,1:] - Zi_ext[:,:-1] ) / dx
dZidyY_ext = ( Zi_ext[1:,:] - Zi_ext[:-1,:] ) / dy
dZidxX = dZidxX_ext[1:-1,:]
dZidyY = dZidyY_ext[:,1:-1]
HX_ext = ( H_ext[:,1:] + H_ext[:,:-1] ) / 2.
HY_ext = ( H_ext[1:,:] + H_ext[:-1,:] ) / 2.
HX = HX_ext[1:-1,:]
HY = HY_ext[:,1:-1]
taubxX_ext = -rhoI * g * HX_ext * dZidxX_ext
taubyY_ext = -rhoI * g * HY_ext * dZidyY_ext
taubxX = taubxX_ext[1:-1,:]
taubyY = taubyY_ext[:,1:-1]
taubxY = ( taubxX_ext[:-1,:-1] + taubxX_ext[:-1,1:] +
taubxX_ext[1: ,:-1] + taubxX_ext[1: ,1:] ) / 4.
taubyX = ( taubyY_ext[:-1,:-1] + taubyY_ext[:-1,1:] +
taubyY_ext[1: ,:-1] + taubyY_ext[1: ,1:] ) / 4.
taubX = numpy.sqrt( taubxX**2 + taubyX**2 )
taubY = numpy.sqrt( taubxY**2 + taubyY**2 )
taubX = numpy.choose( HX>0 , (0,taubX) )
taubY = numpy.choose( HY>0 , (0,taubY) )
# Fill in zero values with 1 for use in division
xcmpnt = numpy.choose( numpy.abs(taubX)<1e-5 , ( taubxX / taubX , 0. ) )
ycmpnt = numpy.choose( numpy.abs(taubY)<1e-5 , ( taubyY / taubY , 0. ) )
return ( ( xcmpnt , ycmpnt ) , ( taubX , taubY ) , ( HX , HY ) )
# basal_shear_stress()
#--------------------------------------------------------------------------------------------------
def iceflow( taubX , taubY , HX , HY , xcmpnt , ycmpnt ,
THERMAL_TOGGLE = Toggles.THERMAL_TOGGLE,
## THERMAL_TOGGLE=False,
glensA = Parameters.glensA,
#----------------------------------------------
# Remaining values for THERMAL_TOGGLE = True
#----------------------------------------------
MinGlacThick = Parameters.MinGlacThick,
lapseRate = Parameters.lapseRate ): # (value for ice sheets ???)
## MinGlacThick = 1.0,
## lapseRate = numpy.float64(-0.0065)): # (value for ice sheets ???)
#--------------------------------------------
# CALCULATE ICE VELOCITY DUE TO DEFORMATION
#--------------------------------------------
if (THERMAL_TOGGLE):
##################################################################
# NOTE! Many of the vars needed by this segment are undefined,
# such as: lapseRate (added above), eHs, eTs, eTm, To,
# H_ext, Ts_ext and Tm_ext. (SDP, 9/21/09)
##################################################################
A_ext = numpy.zeros(H_ext.shape , dtype='float64' )
ind = numpy.nonzero( numpy.ravel(H_ext) >= MinGlacThick )
Ts_ext = To + lapseRate*( Zi_ext - Elev0 )
#A_ext(ind) = interp3( eHs, eTs, eTm, eA, H_ext(ind), Ts_ext(ind), Tm_ext(ind) ) ;
try:
numpy.put( A_ext , ind , interpolate.interp3d( eHs , eTs , eTm )( numpy.take(H_ext,ind) , numpy.take(Ts_ext,ind) , numpy.take(Tm_ext,ind) ) )
except:
logging.error( "NaN in A, likely H_node exceeds H_glens limits" )
return -1
AX = ( A_ext[1:-1, :-1] + A_ext[1:-1,1: ] ) / 2.
AY = ( A_ext[ :-1,1:-1] + A_ext[1: ,1:-1] ) / 2.
else:
AX = glensA
AY = glensA
# Here's the guts of calculating the depth averaged velocity
UdxX = numpy.abs( .4 * AX * taubX*taubX*taubX * HX ) * xcmpnt
UdyY = numpy.abs( .4 * AY * taubY*taubY*taubY * HY ) * ycmpnt
#UdxX = numpy.fix(UdxX*1e6)*1e-6
#UdyY = numpy.fix(UdyY*1e6)*1e-6
return ( UdxX , UdyY )
# iceflow()
#--------------------------------------------------------------------------------------------------
def ice_sliding( taubX , taubY , xcmpnt , ycmpnt ,
THERMAL_TOGGLE=False,
FREEZEON_TOGGLE=False,
UsChar=Parameters.UsChar,
taubChar=Parameters.taubChar ):
#------------------------------
# CALCULATE SLIDING VELOCITY
#------------------------------
# Here's the guts of calculating the sliding velocity
UsxX = numpy.choose( numpy.abs(taubX)<1e-5 , ( UsChar * numpy.exp(1 - taubChar / taubX) * xcmpnt ,
UsChar * numpy.exp(1 - taubChar ) * xcmpnt ) )
UsyY = numpy.choose( numpy.abs(taubY)<1e-5 , ( UsChar * numpy.exp(1 - taubChar / taubY) * ycmpnt ,
UsChar * numpy.exp(1 - taubChar ) * ycmpnt ) )
if (THERMAL_TOGGLE and FREEZEON_TOGGLE):
##################################################################
# NOTE! Many of the vars needed by this segment are undefined,
# such as: Tb_ext, Zb_ext, seaLevel. (SDP, 9/21/09)
##################################################################
## notFrozen = (Tb_ext > -.5) or (Zb_ext < seaLevel)
notFrozen = numpy.logical_or( Tb_ext > -0.5, Zb_ext < seaLevel )
notFrozenX = ( notFrozen[1:-1, :-1] + notFrozen[1:-1,1: ] ) / 2.
notFrozenY = ( notFrozen[ :-1,1:-1] + notFrozen[1: ,1:-1] ) / 2.
UsxX *= notFrozenX
UsyY *= notFrozenY
return ( UsxX , UsyY )
# ice_sliding()
#--------------------------------------------------------------------------------------------------
def sum_ice_motion( UdxX , UdyY , UsxX , UsyY ):
UxX = (UdxX + UsxX)
UyY = (UdyY + UsyY)
return ( UxX , UyY )
# sum_ice_motion()
#--------------------------------------------------------------------------------------------------
def avalanche( H , angleOfRepose=Parameters.angleOfRepose ):
#---------------------------------------
# AVALANCHE SNOW OFF OF STEEP SURFACES
#---------------------------------------------------------
# move ice downslope until the ice surface is everywhere
# less then or near the angle of repose
#---------------------------------------------------------
ny, nx = Zb.shape
dHRepose = dx * | numpy.tan(angleOfRepose * numpy.pi / 180.) | numpy.tan |
from typing import List, Any
import numpy as np
from monai.data import NibabelReader
import matplotlib.pyplot as plt
from numpy import ndarray
from pathlib import Path
import seaborn as sns
from monai.transforms import apply_transform
from utils.transforms import get_modality_img_transforms
plt.rcParams["figure.figsize"] = (8.0, 12.0)
PATH = Path("/home/jq/Desktop/rUnet/data/BraTS")
def plot_slices(
processed_preds: List[ndarray],
processed_targets: List[ndarray],
clip_min: float,
clip_max: float,
) -> Any:
fig: plt.Figure
axes: plt.Axes
fig, axes = plt.subplots(nrows=len(processed_preds), ncols=2)
maes, maes_255, masked_maes, masked_255_maes = [], [], [], []
max_value = 0
min_value = 255
for i, (pred, targ) in enumerate(zip(processed_preds, processed_targets)):
mask = targ == 0
pred_255 = np.clip(pred, -clip_min, clip_max)
targ_255 = np.clip(targ, -clip_min, clip_max)
min_pred = min(-clip_min, np.min(pred))
min_targ = min(-clip_min, np.min(targ))
pred_255 = np.floor(255 * ((pred_255 - min_pred) / (clip_max - min_pred)))
targ_255 = np.floor(255 * ((targ_255 - min_targ) / (clip_max - min_targ)))
max_value = max(max(max_value, np.max(pred_255)), np.max(targ_255))
min_value = min(min(min_value, np.min(pred_255)), np.min(targ_255))
pred_255[mask] = 0
targ_255[mask] = 0
mae_255 = np.mean(np.abs(pred_255.ravel() - targ_255.ravel()))
mae = np.mean(np.abs(pred.ravel() - targ.ravel()))
masked_mae = np.mean(np.abs(pred[~mask].ravel() - targ[~mask].ravel()))
masked_255_mae = np.mean(np.abs(pred_255[~mask].ravel() - targ_255[~mask].ravel()))
maes.append(mae)
maes_255.append(mae_255)
masked_maes.append(masked_mae)
masked_255_maes.append(masked_255_mae)
mae_str = "{:1.2f}".format(float(np.round(mae, 2)))
mae_255_str = "{:1.2f}".format(float(np.round(mae_255, 2)))
mask_str = "{:1.2f}".format(float(np.round(masked_mae, 2)))
mask_255_str = "{:1.2f}".format(float(np.round(masked_255_mae, 2)))
axes[i][0].imshow(pred_255, cmap="Greys")
axes[i][0].set_title(
f"Predicted (MAE={mae_str}, MAE_255={mae_255_str}\n masked: (MAE={mask_str}, MAE_255={mask_255_str})"
f"clip_max: {clip_max}, clip_min: {clip_min}",
{"fontsize": 6},
)
axes[i][1].imshow(targ_255, cmap="Greys")
axes[i][1].set_title("Target", {"fontsize": 8})
mae_clean = "{:1.2f}".format(float(np.round(np.mean(maes), 2)))
mae_255_clean = "{:1.2f}".format(float(np.round(np.mean(maes_255), 2)))
mask_clean = "{:1.2f}".format(float(np.round(np.mean(masked_maes), 2)))
mask_255_clean = "{:1.2f}".format(float(np.round( | np.mean(masked_255_maes) | numpy.mean |
#[2020]-"A new fusion of grey wolf optimizer algorithm with a two-phase mutation for feature selection"
import numpy as np
from numpy.random import rand
from FS.functionHO import Fun
def init_position(lb, ub, N, dim):
X = np.zeros([N, dim], dtype='float')
for i in range(N):
for d in range(dim):
X[i,d] = lb[0,d] + (ub[0,d] - lb[0,d]) * rand()
return X
def binary_conversion(X, thres, N, dim):
Xbin = np.zeros([N, dim], dtype='int')
for i in range(N):
for d in range(dim):
if X[i,d] > thres:
Xbin[i,d] = 1
else:
Xbin[i,d] = 0
return Xbin
def boundary(x, lb, ub):
if x < lb:
x = lb
if x > ub:
x = ub
return x
#--- transfer function update binary position (4.3.2)
def transfer_function(x):
Xs = abs(np.tanh(x))
return Xs
def jfs(xtrain, ytrain, opts):
# Parameters
ub = 1
lb = 0
thres = 0.5
Mp = 0.5 # mutation probability
N = opts['N']
max_iter = opts['T']
if 'Mp' in opts:
Mp = opts['Mp']
# Dimension
dim = np.size(xtrain, 1)
if np.size(lb) == 1:
ub = ub * np.ones([1, dim], dtype='float')
lb = lb * np.ones([1, dim], dtype='float')
# Initialize position
X = init_position(lb, ub, N, dim)
#--- Binary conversion
X = binary_conversion(X, thres, N, dim)
# Fitness at first iteration
fit = np.zeros([N, 1], dtype='float')
Xalpha = np.zeros([1, dim], dtype='int')
Xbeta = np.zeros([1, dim], dtype='int')
Xdelta = np.zeros([1, dim], dtype='int')
Falpha = float('inf')
Fbeta = float('inf')
Fdelta = float('inf')
for i in range(N):
fit[i,0] = Fun(xtrain, ytrain, X[i,:], opts)
if fit[i,0] < Falpha:
Xalpha[0,:] = X[i,:]
Falpha = fit[i,0]
if fit[i,0] < Fbeta and fit[i,0] > Falpha:
Xbeta[0,:] = X[i,:]
Fbeta = fit[i,0]
if fit[i,0] < Fdelta and fit[i,0] > Fbeta and fit[i,0] > Falpha:
Xdelta[0,:] = X[i,:]
Fdelta = fit[i,0]
# Pre
curve = np.zeros([1, max_iter], dtype='float')
t = 0
curve[0,t] = Falpha.copy()
print("Iteration:", t + 1)
print("Best (TMGWO):", curve[0,t])
t += 1
while t < max_iter:
# Coefficient decreases linearly from 2 to 0 (3.5)
a = 2 - t * (2 / max_iter)
for i in range(N):
for d in range(dim):
# Parameter C (3.4)
C1 = 2 * rand()
C2 = 2 * rand()
C3 = 2 * rand()
# Compute Dalpha, Dbeta & Ddelta (3.7 - 3.9)
Dalpha = abs(C1 * Xalpha[0,d] - X[i,d])
Dbeta = abs(C2 * Xbeta[0,d] - X[i,d])
Ddelta = abs(C3 * Xdelta[0,d] - X[i,d])
# Parameter A (3.3)
A1 = 2 * a * rand() - a
A2 = 2 * a * rand() - a
A3 = 2 * a * rand() - a
# Compute X1, X2 & X3 (3.7 -3.9)
X1 = Xalpha[0,d] - A1 * Dalpha
X2 = Xbeta[0,d] - A2 * Dbeta
X3 = Xdelta[0,d] - A3 * Ddelta
# Update wolf (3.6)
Xn = (X1 + X2 + X3) / 3
#--- transfer function
Xs = transfer_function(Xn)
#--- update position (4.3.2)
if rand() < Xs:
X[i,d] = 0
else:
X[i,d] = 1
# Fitness
for i in range(N):
fit[i,0] = Fun(xtrain, ytrain, X[i,:], opts)
if fit[i,0] < Falpha:
Xalpha[0,:] = X[i,:]
Falpha = fit[i,0]
if fit[i,0] < Fbeta and fit[i,0] > Falpha:
Xbeta[0,:] = X[i,:]
Fbeta = fit[i,0]
if fit[i,0] < Fdelta and fit[i,0] > Fbeta and fit[i,0] > Falpha:
Xdelta[0,:] = X[i,:]
Fdelta = fit[i,0]
curve[0,t] = Falpha.copy()
print("Iteration:", t + 1)
print("Best (TMGWO):", curve[0,t])
t += 1
#--- two phase mutation: first phase
# find index of 1
idx = np.where(Xalpha == 1)
idx1 = idx[1]
Xmut1 = | np.zeros([1, dim], dtype='int') | numpy.zeros |
#!/usr/bin/env python3
import numpy as np
import argparse
from scipy.integrate import odeint as integrate
from matplotlib import pyplot as plot
from numpy.linalg import norm
from mpl_toolkits.mplot3d import Axes3D
parser = argparse.ArgumentParser()
# Ball parameters
constants = parser.add_argument_group("Constants")
constants.add_argument("-m", "--mass", default=0.04593, help="Mass of ball (kg)")
constants.add_argument("-r", "--radius", default=0.04267/2, help="Radius of ball (m)")
constants.add_argument("-i", "--inertia", type=float, default=9.145e-6, help="Inertia of golf ball")
constants.add_argument("--clubmass", type=float, default=0.2, help="Mass of club head (kg)")
constants.add_argument("-g", "--gravity", type=float, default=9.81, help="For when we get a Mars base (m/s/s)")
constants.add_argument("-d", "--density", type=float, default=1.225, help="Density of air (kg m^-3)")
constants.add_argument("--viscosity", type=float, default=1.46e-5, help="Kinematic viscosity of air")
# Initial parameters
initialparams = parser.add_argument_group("Initial parameters")
initialparams.add_argument("-yi", "--height", type=float, default=0, help="Initial height (m)")
initialparams.add_argument("--vclub", type=float, default=51.4, help="Club speed (m/s)")
# Loft angle
loftangleparams = parser.add_argument_group("Loft angle parameters")
loftangleparams.add_argument("-li", "--loftinitial", type=float, default=10, help="Loft angle (initial)")
loftangleparams.add_argument("-lf", "--loftfinal", type=float, default=35, help="Loft angle (final)")
loftangleparams.add_argument("-st", "--step", type=float, default=5, help="Loft angle (step)")
# Debugging
parser.add_argument("-v", "--verbose", action="store_true")
# Parse args
args = parser.parse_args()
# Input validation
assert args.loftfinal > args.loftinitial, "Final loft angle must be gretaer than initial loft angle!"
assert args.step != 0, "Step must be non-zero!"
assert ((args.loftfinal - args.loftinitial) / args.step).is_integer(), "Step size must divide the change in loft angle!"
assert args.mass != 0, "Mass must be non-zero."
assert args.radius != 0, "Radius must be non-zero."
assert args.viscosity != 0, "Kinematic viscosity must be non-zero."
assert args.density != 0, "Density of air must be non-zero."
g = args.gravity
density = args.density
# Ball speed from club speed and loft angle
def ball_speed(theta):
theta = np.radians(theta)
e = 0.86 - 0.0029 * args.vclub * np.cos(theta)
bfn = (1 + e) * args.vclub * np.cos(theta) / (1 + args.mass / args.clubmass)
bfp = args.vclub * np.sin(theta) / (1 + args.mass / args.clubmass + (args.mass * args.radius**2 / args.inertia))
return np.sqrt(bfn**2 + bfp**2)
# Spin
def ball_spin(theta):
theta = | np.radians(theta) | numpy.radians |
# add LDDMM shooting code into path
import sys
sys.path.append('../vectormomentum/Code/Python');
sys.path.append('../library')
from subprocess import call
import argparse
import os.path
#Add deep learning related libraries
from collections import Counter
import torch
import prediction_network
import util
import numpy as np
from skimage import exposure
#Add LDDMM registration related libraries
# pyca modules
import PyCA.Core as ca
import PyCA.Common as common
#import PyCA.Display as display
# vector momentum modules
# others
import logging
import copy
import math
import registration_methods
#parse command line input
parser = argparse.ArgumentParser(description='Deformation prediction given set of moving and target images.')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('--moving-image', nargs='+', required=True, metavar=('m1', 'm2, m3...'),
help='List of moving images, seperated by space.')
requiredNamed.add_argument('--target-image', nargs='+', required=True, metavar=('t1', 't2, t3...'),
help='List of target images, seperated by space.')
requiredNamed.add_argument('--output-prefix', nargs='+', required=True, metavar=('o1', 'o2, o3...'),
help='List of registration output prefixes for every moving/target image pair, seperated by space. Preferred to be a directory (e.g. /some_path/output_dir/)')
parser.add_argument('--samples', type=int, default=50, metavar='N',
help='number of times to sample the network (default: 64)')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for prediction network (default: 64)')
parser.add_argument('--n-GPU', type=int, default=1, metavar='N',
help='number of GPUs used for prediction (default: 1). For maximum efficiency please set the batch size divisible by the number of GPUs.')
parser.add_argument('--use-CPU-for-shooting', action='store_true', default=False,
help='Use CPU for geodesic shooting. Slow, but saves GPU memory.')
parser.add_argument('--shoot-steps', type=int, default=0, metavar='N',
help='time steps for geodesic shooting. Ignore this option to use the default step size used by the registration model.')
parser.add_argument('--affine-align', action='store_true', default=False,
help='Perform affine registration to align moving and target images to ICBM152 atlas space. Require niftireg.')
parser.add_argument('--histeq', action='store_true', default=False,
help='Perform histogram equalization to the moving and target images.')
parser.add_argument('--atlas', default="../data/atlas/icbm152.nii",
help="Atlas to use for (affine) pre-registration")
parser.add_argument('--prediction-parameter', default='../../network_configs/OASIS_predict_probabilistic.pth.tar',
help="network parameters for the prediction network")
args = parser.parse_args()
# check validity of input arguments from command line
def check_args(args):
# number of input images/output prefix consistency check
n_moving_images = len(args.moving_image)
n_target_images = len(args.target_image)
n_output_prefix = len(args.output_prefix)
if (n_moving_images != n_target_images):
print('The number of moving images is not consistent with the number of target images!')
sys.exit(1)
elif (n_moving_images != n_output_prefix ):
print('The number of output prefix is not consistent with the number of input images!')
sys.exit(1)
# number of GPU check (positive integers)
if (args.n_GPU <= 0):
print('Number of GPUs must be positive!')
sys.exit(1)
# geodesic shooting step check (positive integers)
if (args.shoot_steps < 0):
print('Shooting steps (--shoot-steps) is negative. Using model default step.')
# geodesic shooting step check (positive integers)
if (args.samples < 1):
print('Number of samples (--samples) is smaller than 1. Using model default step.')
#enddef
def create_net(args, network_config):
net_single = prediction_network.net(network_config['network_feature']).cuda();
net_single.load_state_dict(network_config['state_dict'])
if (args.n_GPU > 1) :
device_ids=range(0, args.n_GPU)
net = torch.nn.DataParallel(net_single, device_ids=device_ids).cuda()
else:
net = net_single
net.train()
return net;
#enddef
def preprocess_image(image_pyca, histeq):
image_np = common.AsNPCopy(image_pyca)
nan_mask = np.isnan(image_np)
image_np[nan_mask] = 0
image_np /= np.amax(image_np)
# perform histogram equalization if needed
if histeq:
image_np[image_np != 0] = exposure.equalize_hist(image_np[image_np != 0])
return image_np
#perform deformation prediction
def predict_image(args):
if (args.use_CPU_for_shooting):
mType = ca.MEM_HOST
else:
mType = ca.MEM_DEVICE
# load the prediction network
predict_network_config = torch.load(args.prediction_parameter)
prediction_net = create_net(args, predict_network_config);
batch_size = args.batch_size
patch_size = predict_network_config['patch_size']
input_batch = torch.zeros(batch_size, 2, patch_size, patch_size, patch_size).cuda()
# start prediction
for i in range(0, len(args.moving_image)):
common.Mkdir_p(os.path.dirname(args.output_prefix[i]))
if (args.affine_align):
# Perform affine registration to both moving and target image to the ICBM152 atlas space.
# Registration is done using Niftireg.
call(["reg_aladin",
"-noSym", "-speeeeed", "-ref", args.atlas ,
"-flo", args.moving_image[i],
"-res", args.output_prefix[i]+"moving_affine.nii",
"-aff", args.output_prefix[i]+'moving_affine_transform.txt'])
call(["reg_aladin",
"-noSym", "-speeeeed" ,"-ref", args.atlas ,
"-flo", args.target_image[i],
"-res", args.output_prefix[i]+"target_affine.nii",
"-aff", args.output_prefix[i]+'target_affine_transform.txt'])
moving_image = common.LoadITKImage(args.output_prefix[i]+"moving_affine.nii", mType)
target_image = common.LoadITKImage(args.output_prefix[i]+"target_affine.nii", mType)
else:
moving_image = common.LoadITKImage(args.moving_image[i], mType)
target_image = common.LoadITKImage(args.target_image[i], mType)
#preprocessing of the image
moving_image_np = preprocess_image(moving_image, args.histeq);
target_image_np = preprocess_image(target_image, args.histeq);
grid = moving_image.grid()
moving_image_processed = common.ImFromNPArr(moving_image_np, mType)
target_image_processed = common.ImFromNPArr(target_image_np, mType)
moving_image.setGrid(grid)
target_image.setGrid(grid)
predict_transform_space = False
if 'matlab_t7' in predict_network_config:
predict_transform_space = True
# run actual prediction
prediction_result = util.predict_momentum(moving_image_np, target_image_np, input_batch, batch_size, patch_size, prediction_net, predict_transform_space);
m0 = prediction_result['image_space']
m0_reg = common.FieldFromNPArr(prediction_result['image_space'], mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi = common.AsNPCopy(registration_result['phiinv'])
phi_square = np.power(phi,2)
for sample_iter in range(1, args.samples):
print(sample_iter)
prediction_result = util.predict_momentum(moving_image_np, target_image_np, input_batch, batch_size, patch_size, prediction_net, predict_transform_space);
m0 += prediction_result['image_space']
m0_reg = common.FieldFromNPArr(prediction_result['image_space'], mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi += common.AsNPCopy(registration_result['phiinv'])
phi_square += np.power(common.AsNPCopy(registration_result['phiinv']),2)
m0_mean = np.divide(m0, args.samples);
m0_reg = common.FieldFromNPArr(m0_mean, mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi_mean = registration_result['phiinv']
phi_var = | np.divide(phi_square, args.samples) | numpy.divide |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os, json
import games
import time
import utils.miscellaneous
from models.mlp import MLP
from models.echo_state_network import EchoState
from models.random import Random
from models.learned_dqn import LearnedDQN
from models.learned_ddpg import LearnedDDPG
def bar_plot(values, evals, game):
fig, ax = plt.subplots(figsize=(6, 7))
bar_width = 0.3
opacity = 0.5
for index, (name, value) in enumerate(values):
rects = plt.bar(index, value, bar_width,
alpha=opacity,
color='b',
label=name,
align='center')
autolabel(rects, ax)
ylim = get_y_lim_for_game(game)
plt.ylim([0, ylim])
plt.gca().axes.set_xticklabels([])
plt.ylabel('AVG fitness')
plt.title('Model comparison - {} runs - {}'.format(evals, game))
x = np.arange(len(values))
ax.set_xticks(x)
ax.set_xticklabels([name for (name, _) in values])
plt.tight_layout()
plt.savefig('comparison.png')
def get_y_lim_for_game(game):
ylim = None
if game == "alhambra":
ylim = 200
if game == "torcs":
ylim = 3000
if game == "mario":
ylim = 1.2
if game == "2048":
ylim = 5000
return ylim
def autolabel(rects, ax):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'{}'.format(float(height)),
ha='center', va='bottom')
def eval(game, evals, model):
parameters = [model, evals, np.random.randint(0, 2 ** 16)]
values = []
game_instance = utils.miscellaneous.get_game_instance(game, parameters, test=True)
results = game_instance.run(advanced_results=True)
for i, r in enumerate(results):
if i > 0:
values.append(("original#{}".format(i), r))
else:
values.append((model.get_name(), r))
return values
def compare_models(game, evals, *args):
print("Comparing models:")
values = []
for model in args:
print(model.get_name())
values += eval(game=game, evals=evals, model=model)
bar_plot(values, evals, game)
return values
def eval_mario_winrate(model, evals, level, vis_on):
"""
Evaluates mario winrate on specified level.
:param model:
:param evals:
:param level: gombas or spikes
:param vis_on:
:return:
"""
game_instance = games.mario.Mario(model, evals, np.random.randint(0, 2 ** 16), level=level, vis_on=vis_on,
use_visualization_tool=True)
results = game_instance.run(advanced_results=True)
print("Mario winrate (avg dist): {}".format(results))
return results
def run_torcs_vis_on(model, evals):
game_instance = games.torcs.Torcs(model, evals, np.random.randint(0, 2 ** 16), vis_on=True)
print("Torcs visualization started.")
results = game_instance.run(advanced_results=True)
def run_2048_extended(model, evals):
print("Game 2048 with extended logs started.")
game_instance = games.game2048.Game2048(model, evals, np.random.randint(0, 2 ** 16))
results = game_instance.run(advanced_results=True)
return results
def run_random_model(game, evals):
print("Generating graph of 'random' model for game {}.".format(game))
results = []
t = time.time()
for i in range(evals):
if time.time() - t > 1 or i == evals - 1:
print("{}/{}".format(i + 1, evals))
t = time.time()
parameters = [Random(game), 1, np.random.randint(0, 2 ** 16)]
game_instance = utils.miscellaneous.get_game_instance(game, parameters)
result = game_instance.run()
results.append(result)
x = range(0, evals)
# plt.plot(x, results, 'b', x, [np.mean(results) for _ in results], 'r--')
plt.scatter(x, results, cmap='b')
plt.plot([np.mean(results) for _ in results], 'r--')
plt.title("Random - game: {} - Average score: {}".format(game, np.mean(results)))
plt.ylim(0, get_y_lim_for_game(game))
plt.xlim(0, evals)
plt.xlabel("Evals")
plt.ylabel("Score")
plt.savefig("random_model_{}.png".format(game))
def eval_alhambra_winrate(model, evals):
print("Evaluating Alhambra winrate.")
wins = [0, 0, 0]
for i in range(evals):
print("{}/{}".format(i + 1, evals))
game_instance = games.alhambra.Alhambra(model, 1, np.random.randint(0, 2 ** 16))
result = game_instance.run(advanced_results=True)
wins[np.argmax(result)] += 1
print("Alhambra winrate: {}% | {}% | {}%".format(100 * wins[0] / evals,
100 * wins[1] / evals,
100 * wins[2] / evals, ))
def eval_alhambra_avg_score(model, evals):
game_instance = games.alhambra.Alhambra(model, evals, np.random.randint(0, 2 ** 16))
result = game_instance.run(advanced_results=True)
return result
# INFERENCE METHOD
def run_model_evaluator():
"""
Used for evaluating learned models, to benchmark them and get avg results.
For example, to run 1000 games and plot results.
Set file_name for example as "C:/Users/Jan/Documents/GitHub/general-ai/Experiments/ESN+EA/torcs/logs_2017-06-20_19-09-27/best/best_0.json" (the whole path)
for evolutionary based experiments. For deep reinforcement (DQN or DDPG) based techniques use logdir, for example as:
"C:/Users/Jan/Documents/GitHub/general-ai/Experiments/DQN/mario/logs_2017-04-19_16-26-07", where directory stores config files (model settings), model checkpoint etc.
Then feel free to use some of the prepared "test functions". Result (if exists) is written to same directory as this file (e.q. /utils).
"""
np.random.seed(930615)
# Before using game 2048, check it's encoding
game = "2048"
evals = 1000
# SELECT FILE (direct model for evolutionary or directory for reinforcement)
file_name = "C:/Users/Jan/Documents/GitHub/general-ai/Experiments/MLP+ES/2048/logs_2017-02-21_17-24-07/best/best_0.json"
# logdir = "C:/Users/Jan/Documents/GitHub/general-ai/Experiments/DDPG/torcs/logs_2017-04-29_11-39-44"
# SELECT MODEL (trained, based on file selected)
# esn = EchoState.load_from_file(file_name, game)
mlp = MLP.load_from_file(file_name, game)
# random = Random(game)
# ddpg = LearnedDDPG(logdir)
# dqn = LearnedDQN(logdir)
# RUN MODEL TEST
# eval_alhambra_winrate(mlp, evals)
# run_random_model(game, evals)
run_2048_extended(mlp, evals)
# eval_mario_winrate(model=dqn, evals=evals, level="spikes", vis_on=False)
# run_torcs_vis_on(model=ddpg, evals=evals)
# general model comparison (graph of score)
# compare_models(game, evals, ddpg)
"""
NOTE: Selected file source file, selected model (python object) and the game must be correct (must match). If you save model for
game 2048 using ESN, you can't load this model as DDPG for TORCS of course.
"""
def run_avg_results():
"""
Useful for generating average results for more experiments (logs).
"""
# List of logs to be measured (tested)
items = ["logs_2017-06-23_14-16-00",
"logs_2017-06-23_14-16-59",
"logs_2017-06-23_14-17-58",
"logs_2017-06-23_14-18-48",
"logs_2017-06-23_14-19-39"]
results = []
game = "2048"
evals = 1000
for item in items:
prefix = "C:/Users/Jan/Documents/GitHub/general-ai/Experiments/best_models_repeats/2048/MLP+ES/"
postfix = "/best/best_0.json"
file_name = prefix + item + postfix
logdir = prefix + item
# SELECT PROPER MODEL
model = MLP.load_from_file(file_name, game)
# model = EchoState.load_from_file(file_name, game)
# RUN MODEL
# 2048
result = run_2048_extended(model, evals)
# MARIO
# result = eval_mario_winrate(model=model, evals=evals, level="spikes", vis_on=False)
# ALHAMBRA
# First element is result of our model (rest are original models from previous work)
# result = eval_alhambra_avg_score(model, evals)[0]
# TORCS
# For reinforcement learning, please run model separately (tensorflow needs to be restarted)
results.append(result)
results = np.array(results)
file_name = "{}_stats_{}.txt".format(game, utils.miscellaneous.get_pretty_time())
with open(file_name, "w") as f:
f.write("--GAME {} STATISTICS-- {} trainings of the same model".format(game.upper(), len(items)))
f.write(os.linesep)
f.write("Model: {}".format(model.get_name()))
f.write(os.linesep)
f.write("Total games: {} (for each model)".format(evals))
f.write(os.linesep)
f.write("MAX TEST: {}".format(np.max(results)))
f.write(os.linesep)
f.write("AVG TEST: {}".format(np.mean(results)))
f.write(os.linesep)
f.write("MIN TEST: {}".format( | np.min(results) | numpy.min |
# @Author: guanwanxian
# @Date: 1970-01-01T08:00:00+08:00
# @Email: <EMAIL>
# @Last modified by: guanwanxian
# @Last modified time: 2017-02-25T20:57:35+08:00
"""Draw2DTools, providing functions to generate 2d images
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.transforms as mtransforms
import matplotlib.text as mtext
from Utils import joinPath
from PlotTools import createNewAxes
from ImageUtils import ImgType, OpType
import Draw2DTools as draw2d
class MyLine(lines.Line2D):
def __init__(self, *args, **kwargs):
# we'll update the position when the line data is set
self.text = mtext.Text(0, 0, '')
lines.Line2D.__init__(self, *args, **kwargs)
# we can't access the label attr until *after* the line is
# inited
self.text.set_text(self.get_label())
def set_figure(self, figure):
self.text.set_figure(figure)
lines.Line2D.set_figure(self, figure)
def set_axes(self, axes):
self.text.set_axes(axes)
lines.Line2D.set_axes(self, axes)
def set_transform(self, transform):
# 2 pixel offset
texttrans = transform + mtransforms.Affine2D().translate(2, 2)
self.text.set_transform(texttrans)
lines.Line2D.set_transform(self, transform)
def set_data(self, x, y):
if len(x):
self.text.set_position((x[-1], y[-1]))
lines.Line2D.set_data(self, x, y)
def draw(self, renderer):
# draw my label at the end of the line with 2 pixel offset
lines.Line2D.draw(self, renderer)
self.text.draw(renderer)
def generateLines(img_type):
if img_type==ImgType.NonBranch:
neighbor_number = np.random.randint(1,3) # 1~2
else:
neighbor_number = np.random.randint(3,5) # 3~4
lines_array = []
center_r = np.random.randint(0, 4) # 1~5
center_angle = np.random.randint(0, 360)
x0 = int(center_r * np.cos(np.pi * center_angle/180))
y0 = int(center_r * np.sin(np.pi * center_angle/180))
for i in range(neighbor_number):
neighbor_r = np.random.randint(6, 11) # 6~10
neighbor_angle = np.random.randint(0, 360)
x1 = int(neighbor_r * np.cos(np.pi * neighbor_angle/180))
y1 = int(neighbor_r * np.sin(np.pi * neighbor_angle/180))
# print x0, y0, x1, y1, neighbor_angle
lines_array.append(MyLine([x0, x1], [y0, y1], lw=20, c='k'))
return lines_array
def generateLines2(img_type):
lines_array = []
x0 = int(1 * np.cos(np.pi * 0/180))
y0 = int(1 * np.sin(np.pi * 0/180))
x1 = int(5 * np.cos(np.pi * 180/180))
y1 = int(5 * np.sin(np.pi * 180/180))
lines_array.append(MyLine([x0, x1], [y0, y1], lw=5, c='k'))
x1 = int(5 * np.cos(np.pi * 270/180))
y1 = int(5 * np.sin(np.pi * 270/180))
lines_array.append(MyLine([x0, x1], [y0, y1], lw=5, c='k'))
x1 = int(5 * np.cos(np.pi * 45/180))
y1 = int(5 * np.sin(np.pi * 45/180))
lines_array.append(MyLine([x0, x1], [y0, y1], lw=5, c='k'))
return lines_array
def generateLines3(img_type):
if img_type==ImgType.NonBranch:
neighbor_number = np.random.randint(1,3) # 1~2
else:
neighbor_number = np.random.randint(3,5) # 3~4
margin = int(360/neighbor_number)
print('margin: ', margin)
lines_array = []
center_r = np.random.randint(0, 4) # 1~5
center_angle = np.random.randint(0, 360)
x0 = int(center_r * np.cos(np.pi * center_angle/180))
y0 = int(center_r * np.sin(np.pi * center_angle/180))
for i in range(neighbor_number):
neighbor_r = | np.random.randint(6, 11) | numpy.random.randint |
from warnings import warn
import numpy as np
__all__ = [
'all_bounds',
'error_bounds',
'wasserstein_bounds',
'divergence_bound'
]
def all_bounds(log_weights, samples=None, moment_bound_fn=None,
q_var=None, p_var=None, log_norm_bound=None):
"""Compute all error and distance bounds.
Compute error and distance bounds between distribution `p` and `q` using
samples from `q`. The distributions need not be normalized.
Parameters
----------
log_weights : array-like of integers, shape=(n_samples,)
log weights `log p(x_i) - log q(x_i)`, where `x_i` is sampled from `q`
and `p` may be an unnormalized distribution
samples : array-like matrix, shape=(n_samples, n_dimensions)
samples `x_i` associated with log weights
moment_bound_fn : function
`moment_bound_fn(p)` should return a bound on `min_y E[(x_i - y)^p]`.
It must be provided if `samples` is `None` and it must support `p = 2`
and `p = 4`.
q_var : float or array-like matrix
(Bound on) the (co)variance of `q`.
p_var : float or array-like matrix
(Bound on) the (co)variance of `p`.
log_norm_bound : float
Bound on the overall log normalization constant (the log marginal
likelihood when `p` is the unnormalized log posterior)
Returns
-------
results : dict
contains the following bounds: `mean_error`, `var_error`, `std_error`,
`d2`, `W1`, `W2`."""
d2, log_norm_bound = divergence_bound(log_weights,
log_norm_bound=log_norm_bound,
return_log_norm_bound=True)
results = wasserstein_bounds(d2, samples, moment_bound_fn)
if q_var is None and samples is not None:
q_var = np.cov(samples.T)
results.update(error_bounds(q_var=q_var, p_var=p_var, **results))
results['d2'] = d2
results['log_norm_bound'] = log_norm_bound
return results
def _compute_norm_if_needed(var):
if np.asarray(var).ndim == 2:
return np.linalg.norm(var, ord=2)
return var
def error_bounds(W1=np.inf, W2=np.inf, q_var=np.inf, p_var=np.inf):
"""Compute error bounds.
Compute bounds on differences in the means, standard deviations, and
covariances of `p` and `q` using (bounds on) the 1- and 2-Wasserstein
distance.
Parameters
----------
W1 : float
(Bound on) the 1-Wasserstein distance between `p` and `q`.
W2 : float
(Bound on) the 2-Wasserstein distance between `p` and `q`.
q_var : float or array-like matrix
(Bound on) the (co)variance of `q`.
p_var : float or array-like matrix
(Bound on) the (co)variance of `p`.
Returns
-------
results : dict
contains the following bounds: `mean_error`, `var_error`, `std_error`."""
results = dict()
results['mean_error'] = mean_bound(min(W1, W2))
results['std_error'] = std_bound(W2)
results['cov_error'] = var_bound(W2, _compute_norm_if_needed(q_var),
_compute_norm_if_needed(p_var))
return results
def wasserstein_bounds(d2, samples=None, moment_bound_fn=None):
"""Compute all bounds.
Compute 1- and 2-Wasserstein distance bounds between distribution `p` and
`q` using a bound on the 2-divergence and moment bounds.
Parameters
----------
d2 : float
(Bound on) the 2-divergence between `p` and `q`.
samples : array-like matrix, shape=(n_samples, n_dimensions)
samples from `q`.
moment_bound_fn : array-like matrix, shape=(n_variant_types, n_signatures)
`moment_bound_fn(a)` should return a bound on `min_y E[(x_i - y)^a]`.
It must be provided if `samples` is `None`. Must support `a = 2`
and `a = 4`.
Returns
-------
results : dict
contains the following bounds: `W1`, `W2`."""
results = dict()
if moment_bound_fn is None:
if samples is None:
raise ValueError('must provides samples if moment_bound_fn not given')
samples = np.asarray(samples)
if samples.ndim == 1:
samples = samples[:,np.newaxis]
sample_mean = np.mean(samples, axis=0, keepdims=True)
centered_samples = samples - sample_mean
moment_bound_fn = lambda p: np.mean(np.sum(centered_samples**p, axis=1))
for p in [1, 2]:
Cp = moment_bound_fn(2*p)
results['W{}'.format(p)] = 2 * Cp**(.5/p) * np.expm1(d2)**(.5/p)
return results
def divergence_bound(log_weights, alpha=2., log_norm_bound=None,
return_log_norm_bound=False):
"""Compute a bound on the alpha-divergence.
Compute error and distance bounds between distribution `p` and `q` using
samples from `q`.
Parameters
----------
log_weights : array-like of integers, shape=(n_samples,)
log weights `log p(x_i) - log q(x_i)`, where `x_i` is sampled from `q`
and `p` may be an unnormalized distribution.
alpha : float
order of the Renyi divergence. Must be greater than 1
log_norm_bound : float
Bound on the log normalization constant for `p` (the log marginal
likelihood when `p` is the unnormalized log posterior).
Returns
-------
dalpha : float
Bound on the alpha-divergence."""
if alpha <= 1:
raise ValueError('alpha must be greater than 1')
log_weights = np.asarray(log_weights)
log_rescale = np.max(log_weights)
rescaled_weights = np.exp(log_weights - log_rescale)**alpha
mean_rescaled_weight = mean_and_check_mc_error(rescaled_weights,
quantity_name='CUBO')
cubo = np.log(mean_rescaled_weight)/alpha + log_rescale
if log_norm_bound is None:
log_norm_bound = mean_and_check_mc_error(log_weights,
quantity_name='ELBO')
dalpha = alpha / (alpha - 1) * (cubo - log_norm_bound)
if return_log_norm_bound:
return dalpha, log_norm_bound
return dalpha
def mean_and_check_mc_error(a, atol=0.01, rtol=0.0, quantity_name=None):
m = np.mean(a)
s = np.std(a)/np.sqrt(a.size)
if s > rtol* | np.abs(m) | numpy.abs |
import random, time, os, decoder
from PIL import Image
import numpy as np
import tensorflow as tf
import sys
SMALL_DATA_SET = 0 # 0-large data set(for gpu), 1-small data set(for cpu debug)
saved_models_dir = 'saved_models'
summaries_dir = "summaries"
idx_to_vocab = None
vocab_to_idx = None
def load_data():
global idx_to_vocab, vocab_to_idx
vocab = open('data/latex_vocab.txt').read().split('\n')
vocab_to_idx = dict([(vocab[i], i + 4) for i in range(len(vocab))])
idx_to_vocab = {value: key for key, value in vocab_to_idx.items()}
for i in range(4): idx_to_vocab.update({i: i})
formulas = open('data/formulas.norm.lst').read().split('\n')
# four meta keywords
# 0: START
# 1: END
# 2: UNKNOWN
# 3: PADDING
def formula_to_indices(formula):
formula = formula.split(' ')
res = [0]
for token in formula:
if token in vocab_to_idx:
res.append(vocab_to_idx[token])
else:
res.append(2)
res.append(1)
return res
formulas = map(formula_to_indices, formulas)
train = open('data/train_filter.lst').read().split('\n')[:-1]
val = open('data/validate_filter.lst').read().split('\n')[:-1]
test = open('data/test_filter.lst').read().split('\n')[:-1]
if SMALL_DATA_SET:
print('use small data set')
print('len train, val, test', len(train), len(val), len(test))
data_set_scale = 1.0 / 400
train = train[:int(len(train) * data_set_scale)]
val = val[:int(len(val) * data_set_scale)]
test = test[:int(len(test) * data_set_scale)]
print('process len train, val, test', len(train), len(val), len(test))
else:
print('use large data set')
print('sample png file name: {}'.format(test[0].split(' ')[0]))
def import_images(datum):
datum = datum.split(' ')
img = np.array(Image.open('data/images_processed/' + datum[0]).convert('L'))
return (img, formulas[int(datum[1])])
train = map(import_images, train)
val = map(import_images, val)
test = map(import_images, test)
return train, val, test
def batchify(data, batch_size):
# group by image size
res = {}
for datum in data:
if datum[0].shape not in res:
res[datum[0].shape] = [datum]
else:
res[datum[0].shape].append(datum)
batches = []
for size in res:
# batch by similar sequence length within each image-size group -- this keeps padding to a
# minimum
group = sorted(res[size], key=lambda x: len(x[1]))
for i in range(0, len(group), batch_size):
images = map(lambda x: np.expand_dims(np.expand_dims(x[0], 0), 3), group[i:i + batch_size])
batch_images = np.concatenate(images, 0)
seq_len = max([len(x[1]) for x in group[i:i + batch_size]])
def preprocess(x):
arr = np.array(x[1])
pad = np.pad(arr, (0, seq_len - arr.shape[0]), 'constant', constant_values=3)
return np.expand_dims(pad, 0)
labels = map(preprocess, group[i:i + batch_size])
batch_labels = np.concatenate(labels, 0)
too_big = [(160, 400), (100, 500), (100, 360), (60, 360), (50, 400), \
(100, 800), (200, 500), (800, 800), (100, 600)] # these are only for the test set
if batch_labels.shape[0] == batch_size \
and not (batch_images.shape[1], batch_images.shape[2]) in too_big:
batches.append((batch_images, batch_labels))
# skip the last incomplete batch for now
return batches
def init_cnn(inp):
def weight_variable(name, shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.get_variable(name + "_weights", initializer=initial)
def bias_variable(name, shape):
initial = tf.constant(0.1, shape=shape)
return tf.get_variable(name + "_bias", initializer=initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
W_conv1 = weight_variable("conv1", [3, 3, 1, 512])
b_conv1 = bias_variable("conv1", [512])
h_conv1 = tf.nn.relu(conv2d(inp, W_conv1) + b_conv1)
h_bn1 = tf.contrib.layers.batch_norm(h_conv1)
W_conv2 = weight_variable("conv2", [3, 3, 512, 512])
b_conv2 = bias_variable("conv2", [512])
h_pad2 = tf.pad(h_bn1, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
h_conv2 = tf.nn.relu(conv2d(h_pad2, W_conv2) + b_conv2)
h_bn2 = tf.contrib.layers.batch_norm(h_conv2)
h_pool2 = tf.nn.max_pool(h_bn2, ksize=[1, 1, 2, 1], strides=[1, 1, 2, 1], padding='SAME')
W_conv3 = weight_variable("conv3", [3, 3, 512, 256])
b_conv3 = bias_variable("conv3", [256])
h_pad3 = tf.pad(h_pool2, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
h_conv3 = tf.nn.relu(conv2d(h_pad3, W_conv3) + b_conv3)
h_pool3 = tf.nn.max_pool(h_conv3, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding='SAME')
W_conv4 = weight_variable("conv4", [3, 3, 256, 256])
b_conv4 = bias_variable("conv4", [256])
h_pad4 = tf.pad(h_pool3, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
h_conv4 = tf.nn.relu(conv2d(h_pad4, W_conv4) + b_conv4)
h_bn4 = tf.contrib.layers.batch_norm(h_conv4)
W_conv5 = weight_variable("conv5", [3, 3, 256, 128])
b_conv5 = bias_variable("conv5", [128])
h_pad5 = tf.pad(h_bn4, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
h_conv5 = tf.nn.relu(conv2d(h_pad5, W_conv5) + b_conv5)
h_pool5 = tf.nn.max_pool(h_conv5, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
W_conv6 = weight_variable("conv6", [3, 3, 128, 64])
b_conv6 = bias_variable("conv6", [64])
h_pad6 = tf.pad(h_pool5, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
h_conv6 = tf.nn.relu(conv2d(h_pad6, W_conv6) + b_conv6)
h_pad6 = tf.pad(h_conv6, [[0, 0], [2, 2], [2, 2], [0, 0]], "CONSTANT")
h_pool6 = tf.nn.max_pool(h_pad6, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
return h_pool6
def build_model(inp, batch_size, num_rows, num_columns, dec_seq_len):
# constants
enc_lstm_dim = 256
feat_size = 64
dec_lstm_dim = 512
vocab_size = 503
embedding_size = 80
cnn = init_cnn(inp)
# function for map to apply the rnn to each row
def fn(inp):
enc_init_shape = [batch_size, enc_lstm_dim]
with tf.variable_scope('encoder_rnn'):
with tf.variable_scope('forward'):
lstm_cell_fw = tf.nn.rnn_cell.LSTMCell(enc_lstm_dim)
init_fw = tf.nn.rnn_cell.LSTMStateTuple( \
tf.get_variable("enc_fw_c", enc_init_shape), \
tf.get_variable("enc_fw_h", enc_init_shape)
)
with tf.variable_scope('backward'):
lstm_cell_bw = tf.nn.rnn_cell.LSTMCell(enc_lstm_dim)
init_bw = tf.nn.rnn_cell.LSTMStateTuple( \
tf.get_variable("enc_bw_c", enc_init_shape), \
tf.get_variable("enc_bw_h", enc_init_shape)
)
output, _ = tf.nn.bidirectional_dynamic_rnn(lstm_cell_fw, \
lstm_cell_bw, \
inp, \
sequence_length=tf.fill([batch_size], \
tf.shape(inp)[1]), \
initial_state_fw=init_fw, \
initial_state_bw=init_bw \
)
return tf.concat(2, output)
fun = tf.make_template('fun', fn)
# shape is (batch size, rows, columns, features)
# swap axes so rows are first. map splits tensor on first axis, so fn will be applied to tensors
# of shape (batch_size,time_steps,feat_size)
rows_first = tf.transpose(cnn, [1, 0, 2, 3])
res = tf.map_fn(fun, rows_first, dtype=tf.float32)
encoder_output = tf.transpose(res, [1, 0, 2, 3])
dec_lstm_cell = tf.nn.rnn_cell.LSTMCell(dec_lstm_dim)
dec_init_shape = [batch_size, dec_lstm_dim]
dec_init_state = tf.nn.rnn_cell.LSTMStateTuple(tf.truncated_normal(dec_init_shape), \
tf.truncated_normal(dec_init_shape))
init_words = np.zeros([batch_size, 1, vocab_size])
decoder_output = decoder.embedding_attention_decoder(dec_init_state, \
tf.reshape(encoder_output, \
[batch_size, -1, \
2 * enc_lstm_dim]), \
dec_lstm_cell, \
vocab_size, \
dec_seq_len, \
batch_size, \
embedding_size, \
feed_previous=True)
return (encoder_output, decoder_output)
batch_size = 20
epochs = 100
lr = 0.1
min_lr = 0.001
start_time = time.time()
print("Loading Data")
train, val, test = load_data()
train = batchify(train, batch_size)
# train = sorted(train,key= lambda x: x[1].shape[1])
random.shuffle(train)
val = batchify(val, batch_size)
test_batch = batchify(test, batch_size)
print("Building Model")
learning_rate = tf.placeholder(tf.float32)
inp = tf.placeholder(tf.float32)
num_rows = tf.placeholder(tf.int32)
num_columns = tf.placeholder(tf.int32)
num_words = tf.placeholder(tf.int32)
true_labels = tf.placeholder(tf.int32)
train_accuracy = tf.placeholder(tf.float32)
val_accuracy = tf.placeholder(tf.float32)
_, (output, state) = build_model(inp, batch_size, num_rows, num_columns, num_words)
cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(output, true_labels))
train_step = tf.train.AdadeltaOptimizer(learning_rate).minimize(cross_entropy)
output_idx = tf.to_int32(tf.argmax(output, 2))
correct_prediction = tf.equal(output_idx, true_labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('cross_entropy', cross_entropy)
tf.summary.scalar('train_accuracy', train_accuracy)
tf.summary.scalar('val_accuracy', val_accuracy)
def run_train():
global lr
last_val_acc = 0
reduce_lr = 0
global_step = 0
train_accuracy_value = .0
val_accuracy_value = .0
with tf.Session() as sess:
try:
if tf.gfile.Exists(summaries_dir):
tf.gfile.DeleteRecursively(summaries_dir)
tf.gfile.MakeDirs(summaries_dir)
if not tf.gfile.Exists(saved_models_dir):
tf.gfile.MakeDirs(saved_models_dir)
sess.run(tf.global_variables_initializer())
merged_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(summaries_dir, sess.graph)
saver = tf.train.Saver()
print("Training")
for i in range(epochs):
if reduce_lr == 5:
lr = max(min_lr, lr - 0.005)
reduce_lr = 0
print("\nEpoch %d learning rate %.4f" % (i, lr))
epoch_start_time = time.time()
batch_50_start = epoch_start_time
for j in range(len(train)):
global_step += 1
images, labels = train[j]
if j < 5 or j % 50 == 0:
train_accuracy_value = accuracy.eval(feed_dict={inp: images, \
true_labels: labels, \
num_rows: images.shape[1], \
num_columns: images.shape[2], \
num_words: labels.shape[1]})
new_time = time.time()
print("step %d/%d, training accuracy %g, val accuracy %g, took %f mins" % \
(j, len(train), train_accuracy_value, val_accuracy_value, (new_time - batch_50_start) / 60))
batch_50_start = new_time
# about 3.5 minutes per 50 global_step when run in aws p2.xlarge
if j > 5:
print('saver.save, global_step =', global_step)
saver.save(sess, os.path.join(saved_models_dir, 'im2latex.ckpt'), global_step=global_step)
summary, loss, _= sess.run([merged_op, cross_entropy, train_step], \
feed_dict={learning_rate: lr,
inp: images, \
true_labels: labels, \
num_rows: images.shape[1], \
num_columns: images.shape[2], \
num_words: labels.shape[1],
train_accuracy: train_accuracy_value,
val_accuracy: val_accuracy_value})
print('loss', loss)
writer.add_summary(summary, global_step)
print("Time for epoch:%f mins" % ((time.time() - epoch_start_time) / 60))
print("Running on Validation Set")
accs = []
for j in range(len(val)):
images, labels = val[j]
acc = accuracy.eval(feed_dict={inp: images, \
true_labels: labels, \
num_rows: images.shape[1], \
num_columns: images.shape[2], \
num_words: labels.shape[1]})
accs.append(acc)
val_acc = sess.run(tf.reduce_mean(accs))
val_accuracy_value = val_acc
if (val_acc - last_val_acc) >= .01:
reduce_lr = 0
else:
reduce_lr += reduce_lr
last_val_acc = val_acc
print("val accuracy %g" % val_acc)
finally:
print('Finally saving model')
saver.save(sess, os.path.join(saved_models_dir, 'im2latex.ckpt'), global_step=global_step)
print('Running on Test Set')
accs = []
for j in range(len(test_batch)):
images, labels = test_batch[j]
test_accuracy = accuracy.eval(feed_dict={inp: images, \
true_labels: labels, \
num_rows: images.shape[1], \
num_columns: images.shape[2], \
num_words: labels.shape[1]})
accs.append(test_accuracy)
test_acc = sess.run(tf.reduce_mean(accs))
print("test accuracy %g" % test_acc)
def run_sample():
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt = tf.train.latest_checkpoint(saved_models_dir)
print(ckpt)
saver.restore(sess, ckpt)
images, labels = test[0]
images = | np.array([images] * batch_size) | numpy.array |
from Anton import searchfiles
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.stats import linregress
from itertools import cycle
import seaborn as sns; sns.set()
def get_files():
a = (r'Z:\2_Projekt__Permeabilitätsbeeinflussung\AP 4 - 90%\02_Löslichkeitsuntersuchungen\HS Microscope\Experiments\Final_results\data\2 - RIMR+MS300')
p = searchfiles(a, '.npy')
p.sort()
files = dict()
for i in p:
path,name = os.path.split(i)
if name[-6:-4] in files:
files[name[-6:-4]].append(i)
else:
files[name[-6:-4]] = [i]
return files
def solureaction(g, files, number='all', ende=None, start=None,marker='o', cr='k'):
p = files[g]
if number == 'all':
number = [os.path.split(i)[1][:-19] for i in p]
else:
number = number#['10','30','3','5']
slopes = np.array([])
inters = np.array([])
colors = cycle(i for i in ['b', 'g', 'r', 'c', 'm', 'y', 'k'])
j = 0
for i in range(len(p)):
check = os.path.split(p[i])[1][:-19]
if check in number:
data = | np.load(p[i]) | numpy.load |
import warnings
import numpy as np
from scipy.ndimage import map_coordinates
from dipy.reconst.recspeed import le_to_odf, sum_on_blocks_1d
from dipy.reconst.dsi import project_hemisph_bvecs
from scipy.ndimage.filters import laplace,gaussian_laplace
from scipy.ndimage import zoom,generic_laplace,correlate1d
from dipy.core.geometry import sphere2cart,cart2sphere,vec2vec_rotmat
from dipy.tracking.propspeed import map_coordinates_trilinear_iso
from dipy.reconst.odf import OdfModel
###############################################
# MODULE TEMPORARILY DISABLED FOR REFACTORING #
###############################################
import nose
class UnderConstruction(nose.SkipTest):
pass
raise UnderConstruction()
###############################################
class DiffusionNablaModel(OdfModel):
def __init__(self, bvals, gradients, odf_sphere='symmetric362',
half_sphere_grads=False, fast=True):
''' Reconstruct the signal using Diffusion Nabla Imaging
As described in E.Garyfallidis, "Towards an accurate brain
tractograph"tractograph, PhD thesis, 2011.
Parameters
-----------
bvals : array, shape (N,)
gradients : array, shape (N,3) also known as bvecs
odf_sphere : str or tuple, optional
If str, then load sphere of given name using ``get_sphere``.
If tuple, gives (vertices, faces) for sphere.
filter : array, shape(len(vertices),)
default is None (using standard hanning filter for DSI)
half_sphere_grads : boolean Default(False)
in order to create the q-space we use the bvals and gradients.
If the gradients are only one hemisphere then
See also
----------
dipy.reconst.eit.EquatorialInversionModel, dipy.reconst.dti.TensorModel, dipy.reconst.dsi.DiffusionSpectrumModel
'''
#check if bvectors are provided only on a hemisphere
if half_sphere_grads==True:
pass
#bvals=np.append(bvals.copy(),bvals[1:].copy())
#gradients=np.append(gradients.copy(),-gradients[1:].copy(),axis=0)
#data=np.append(data.copy(),data[...,1:].copy(),axis=-1)
#load bvals and bvecs
self.bvals=bvals
gradients[np.isnan(gradients)] = 0.
self.gradients=gradients
#save number of total diffusion volumes
self.dn=self.gradients.shape[0] #data.shape[-1]
odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
self.set_odf_vertices(odf_vertices,None,odf_faces)
self.odfn=odf_vertices.shape[0]
#odf sampling radius
self.radius=np.arange(0,5,.2)
#self.radiusn=len(self.radius)
#self.create_qspace(bvals,gradients,16,8)
#peak threshold
#self.peak_thr=.7
#equatorial zone
self.zone=5.
self.gaussian_weight=0.05
self.fast=fast
if fast==True:
self.evaluate_odf=self.fast_odf
else:
self.evaluate_odf=self.slow_odf
self.precompute()
def precompute(self):
self.radiusn=len(self.radius)
self.create_qspace(self.bvals,self.gradients,17,8)
if self.fast==False:
self.radon_params()
self.precompute_interp_coords()
if self.fast==True:
self.precompute_fast_coords()
self.precompute_equator_indices(self.zone)
self.precompute_angular(self.gaussian_weight)
def precompute_botox(self,smooth,level):
self.botox_smooth=.05
self.botox_level=.3
def precompute_angular(self,smooth):
if smooth==None:
self.E=None
return
self.W=np.dot(self.odf_vertices,self.odf_vertices.T)
self.W=self.W.astype('f8')
E=np.exp(self.W/smooth)
self.E=E/np.sum(E,axis=1)[:,None]
def create_qspace(self,bvals,gradients,size,origin):
bv=bvals
bmin=np.sort(bv)[1]
bv=np.sqrt(bv/bmin)
qtable=np.vstack((bv,bv,bv)).T*gradients
qtable=np.floor(qtable+.5)
self.qtable=qtable
self.q=qtable+origin
self.q=self.q.astype('i8')
self.origin=origin
self.sz=size
def radon_params(self,ang_res=64):
#calculate radon integration parameters
phis=np.linspace(0,2*np.pi,ang_res)[:-1]
planars=[]
for phi in phis:
planars.append(sphere2cart(1,np.pi/2,phi))
planars=np.array(planars)
planarsR=[]
for v in self.odf_vertices:
R=vec2vec_rotmat(np.array([0,0,1]),v)
planarsR.append( | np.dot(R,planars.T) | numpy.dot |
#!/usr/bin/python3
'''
Created on 6 Jan 2017
@author: <NAME>
'''
import numpy as np
from os import listdir, path
from os.path import isdir, isfile, join, exists, dirname
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn import linear_model
from sklearn.externals import joblib
from urllib.request import urlopen
from zipfile import ZipFile
from io import BytesIO
from featureExtractor import FeatureExtractor
from imageLoader import batchGenerator
# setup all the paths adn variables
file_dir = dirname(__file__)
dataPath = join(file_dir, "../Dataset/")
stelePath = join(dataPath, "Manual/Preprocessed")
intermediatePath = join(file_dir, "../intermediates")
featurePath = join(intermediatePath, "features.npy")
labelsPath = join(intermediatePath, "labels.npy")
svmPath = join(intermediatePath, "svm.pkl")
image_paths = []
labels = []
batch_size = 200
if not exists(dataPath):
print("downloading dataset (57.5MB)")
url = urlopen("http://iamai.nl/downloads/GlyphDataset.zip")
with ZipFile(BytesIO(url.read())) as z:
z.extractall(join(dataPath, ".."))
# check if the feature file is present, if so; there is no need to recompute the features
# The pre-computed features can also be downloaded from http://iamai.nl/downloads/features.npy
if not isfile(featurePath):
print("indexing images...")
Steles = [ join(stelePath,f) for f in listdir(stelePath) if isdir(join(stelePath,f)) ]
for stele in Steles:
imagePaths = [ join(stele,f) for f in listdir(stele) if isfile(join(stele,f)) ]
for path in imagePaths:
image_paths.append(path)
labels.append(path[(path.rfind("_") + 1): path.rfind(".")])
featureExtractor = FeatureExtractor()
features = []
print("computing features...")
for idx, (batch_images, _) in enumerate(batchGenerator(image_paths, labels, batch_size)):
print("{}/{}".format((idx+1) * batch_size, len(labels)))
features_ = featureExtractor.get_features(batch_images)
features.append(features_)
features = np.vstack(features)
labels = np.asarray(labels)
print("saving features...")
np.save(featurePath, features)
| np.save(labelsPath, labels) | numpy.save |
import tensorflow as tf
import numpy as np
import tqdm
__all__ = ('pad_ragged_2d', 'shuffle_ragged_2d',
'inputs_to_labels', 'get_pos_encoding',
'get_quant_time', 'softmax_with_temp',
'generate_midis')
def pad_ragged_2d(ragged_tensor, pad_idx):
# ragged_tensor -> RAGGED(batch_size, None)
lens = ragged_tensor.row_lengths(axis=-1)
maxlen = tf.math.reduce_max(lens)
mask = tf.sequence_mask(lens, maxlen, tf.bool)
zero_padded = ragged_tensor.to_tensor()
# zero_padded -> (batch_size, maxlen)
padding = tf.constant(pad_idx, dtype=zero_padded.dtype)
padded_tensor = tf.where(mask, zero_padded, padding)
# padded_tensor -> (batch_size, maxlen)
return padded_tensor
def shuffle_ragged_2d(ragged_tensors, pad_idx, lowest_idx=5):
if not isinstance(ragged_tensors, (list, tuple)):
ragged_tensors = [ragged_tensors]
# ragged_tensor -> RAGGED(batch_size, None)
lens = ragged_tensors[0].row_lengths(axis=-1)
kth_lowest = -tf.nn.top_k(-lens, lowest_idx).values[-1]
shuffled_tensors = [[] for _ in ragged_tensors]
for len_, *rows in zip(lens, *ragged_tensors):
assert all(row.shape[0] == len_ for row in rows)
if len_ <= kth_lowest:
new_rows = [tf.pad(row, paddings=[[0, kth_lowest - len_]],
constant_values=pad_idx) for row in rows]
else:
start_idx = tf.random.uniform(
(), minval=0, maxval=len_ - kth_lowest + 1, dtype=tf.int64)
new_rows = [row[start_idx: start_idx + kth_lowest]
for row in rows]
for tensor, row in zip(shuffled_tensors, new_rows):
tensor.append(row[tf.newaxis, :])
shuffled_tensors = [tf.concat(shuffled_tensor, axis=0)
for shuffled_tensor in shuffled_tensors]
return shuffled_tensors
def inputs_to_labels(inputs, pad_idx):
# inputs -> (batch_size, seq_len)
inputs_padded = tf.pad(inputs[:, 1:], paddings=[
[0, 0], [0, 1]], constant_values=pad_idx)
return inputs_padded
def get_pos_encoding(seq_len, d_model):
numerator = np.arange(seq_len, dtype=np.float32)
numerator = numerator[:, np.newaxis]
denominator = np.arange(0, d_model, 2, dtype=np.float32)
denominator = denominator / d_model
denominator = np.power(np.array(10000, dtype=np.float32), denominator)
denominator = 1 / denominator
denominator = np.repeat(denominator, 2)
denominator = denominator[np.newaxis, :]
encoding = np.matmul(numerator, denominator)
encoding[:, ::2] = np.sin(encoding[:, ::2])
encoding[:, 1::2] = np.cos(encoding[:, 1::2])
#encoding = encoding[np.newaxis, ...]
encoding = tf.cast(encoding, dtype=tf.float32)
return encoding
def get_quant_time():
step = 0.001
coef = 1.16
delta = 0
total_reps = 64
local_reps = 2
quant_time = []
for _ in range(total_reps // local_reps):
for _ in range(local_reps):
delta += step
quant_time.append(delta)
step *= coef
quant_time = np.sort(quant_time + [5.0, 0.0])
return quant_time
def softmax_with_temp(x, temp=1.0):
assert isinstance(temp, float)
assert temp > 0
assert all(map(lambda a: a > 0, x))
x = x / np.sum(x) / temp
x = tf.nn.softmax(x).numpy()
return x
def generate_midis(model, seq_len, mem_len, max_len, parser, filenames, pad_idx, top_k=1, temp=1.0):
assert isinstance(seq_len, int)
assert seq_len > 0
assert isinstance(mem_len, int)
assert mem_len >= 0
assert isinstance(max_len, int)
assert max_len > 1
batch_size = len(filenames)
sounds, deltas = zip(*[parser.load_features(filename)
for filename in filenames])
min_len = min([len(s) for s in sounds])
orig_len = np.random.randint(1, min(2 * mem_len, min_len))
assert orig_len >= 1
sounds = np.array([sound[:orig_len] for sound in sounds])
deltas = np.array([delta[:orig_len] for delta in deltas])
# sounds -> (batch_size, orig_len)
full_len = mem_len + seq_len - 1
inputs_sound = tf.constant(sounds[:, -seq_len:])
inputs_delta = tf.constant(deltas[:, -seq_len:])
outputs_sound, outputs_delta, next_mem_list, attention_weight_list, attention_loss_list = model(
inputs=(inputs_sound, inputs_delta),
mem_list=None,
next_mem_len=mem_len,
training=False
)
for _ in tqdm.tqdm(range(max_len)):
outputs_sound = outputs_sound[:, -1, :]
probs_sound = tf.nn.softmax(outputs_sound, axis=-1).numpy()
probs_sound[:, pad_idx] = 0
# probs_sound -> (batch_size, n_sounds)
outputs_delta = outputs_delta[:, -1, :]
probs_delta = tf.nn.softmax(outputs_delta, axis=-1).numpy()
probs_delta[:, pad_idx] = 0
# probs_delta -> (batch_size, n_deltas)
new_sounds = []
for batch_probs in probs_sound:
best_idxs = batch_probs.argsort()[-top_k:][::-1]
best_probs = softmax_with_temp(batch_probs[best_idxs], temp)
new_sound = np.random.choice(best_idxs, p=best_probs)
new_sounds.append(new_sound)
new_sounds = np.array(new_sounds)[:, np.newaxis]
# new_sounds -> (batch_size, 1)
sounds = np.concatenate((sounds, new_sounds), axis=-1)
new_deltas = []
for batch_probs in probs_delta:
best_idxs = batch_probs.argsort()[-top_k:][::-1]
best_probs = softmax_with_temp(batch_probs[best_idxs], temp)
new_delta = np.random.choice(best_idxs, p=best_probs)
new_deltas.append(new_delta)
new_deltas = np.array(new_deltas)[:, np.newaxis]
# new_deltas -> (batch_size, 1)
deltas = np.concatenate((deltas, new_deltas), axis=-1)
inputs_sound = tf.constant(new_sounds)
inputs_delta = tf.constant(new_deltas)
outputs_sound, outputs_delta, next_mem_list, attention_weight_list, attention_loss_list = model(
inputs=(inputs_sound, inputs_delta),
mem_list=next_mem_list,
next_mem_len=mem_len,
training=False
)
sounds = sounds[:, orig_len:]
deltas = deltas[:, orig_len:]
midi_list = [parser.features_to_midi(
sound, delta) for sound, delta in zip(sounds, deltas)]
return midi_list, next_mem_list, attention_weight_list, attention_loss_list
def generate_text(model, seq_len, mem_len, max_len, tokenizer, start_idx, end_idx, blocked_idxs,
batch_size, beginning=None, top_k=3, temp=0.4):
if isinstance(beginning, str):
words = tokenizer.texts_to_sequences([beginning])
words = | np.repeat(words, batch_size, axis=0) | numpy.repeat |
"""Non-negative matrix and tensor factorization basic functions
"""
# Author: <NAME>
# License: MIT
# Jan 4, '20
# Initialize progressbar
import pandas as pd
import math
import numpy as np
from scipy.sparse.linalg import svds
from tqdm import tqdm
from scipy.stats import hypergeom
from scipy.optimize import nnls
from .nmtf_core import *
from .nmtf_utils import *
import sys
if not hasattr(sys, 'argv'):
sys.argv = ['']
EPSILON = np.finfo(np.float32).eps
def NMFInit(M, Mmis, Mt0, Mw0, nc, tolerance, LogIter, myStatusBox):
"""Initialize NMF components using NNSVD
Input:
M: Input matrix
Mmis: Define missing values (0 = missing cell, 1 = real cell)
Mt0: Initial left hand matrix (may be empty)
Mw0: Initial right hand matrix (may be empty)
nc: NMF rank
Output:
Mt: Left hand matrix
Mw: Right hand matrix
Reference
---------
<NAME>, <NAME> (2008) SVD based initialization: A head start for nonnegative matrix factorization
Pattern Recognition Pattern Recognition Volume 41, Issue 4, April 2008, Pages 1350-1362
"""
n, p = M.shape
Mmis = Mmis.astype(np.int)
n_Mmis = Mmis.shape[0]
if n_Mmis == 0:
ID = np.where(np.isnan(M) == True)
n_Mmis = ID[0].size
if n_Mmis > 0:
Mmis = (np.isnan(M) == False)
Mmis = Mmis.astype(np.int)
M[Mmis == 0] = 0
nc = int(nc)
Mt = np.copy(Mt0)
Mw = np.copy(Mw0)
if (Mt.shape[0] == 0) or (Mw.shape[0] == 0):
if n_Mmis == 0:
if nc >= min(n,p):
# arpack does not accept to factorize at full rank -> need to duplicate in both dimensions to force it work
t, d, w = svds(np.concatenate((np.concatenate((M, M), axis=1),np.concatenate((M, M), axis=1)), axis=0), k=nc)
t *= np.sqrt(2)
w *= np.sqrt(2)
d /= 2
# svd causes mem allocation problem with large matrices
# t, d, w = np.linalg.svd(M)
# Mt = t
# Mw = w.T
else:
t, d, w = svds(M, k=nc)
Mt = t[:n,:]
Mw = w[:,:p].T
#svds returns singular vectors in reverse order
Mt = Mt[:,::-1]
Mw = Mw[:,::-1]
d = d[::-1]
else:
Mt, d, Mw, Mmis, Mmsr, Mmsr2, AddMessage, ErrMessage, cancel_pressed = rSVDSolve(
M, Mmis, nc, tolerance, LogIter, 0, "", 200,
1, 1, 1, myStatusBox)
for k in range(0, nc):
U1 = Mt[:, k]
U2 = -Mt[:, k]
U1[U1 < 0] = 0
U2[U2 < 0] = 0
V1 = Mw[:, k]
V2 = -Mw[:, k]
V1[V1 < 0] = 0
V2[V2 < 0] = 0
U1 = np.reshape(U1, (n, 1))
V1 = np.reshape(V1, (1, p))
U2 = np.reshape(U2, (n, 1))
V2 = np.reshape(V2, (1, p))
if np.linalg.norm(U1 @ V1) > np.linalg.norm(U2 @ V2):
Mt[:, k] = np.reshape(U1, n)
Mw[:, k] = np.reshape(V1, p)
else:
Mt[:, k] = np.reshape(U2, n)
Mw[:, k] = np.reshape(V2, p)
return [Mt, Mw]
def rNMFSolve(
M, Mmis, Mt0, Mw0, nc, tolerance, precision, LogIter, MaxIterations, NMFAlgo, NMFFixUserLHE,
NMFFixUserRHE, NMFMaxInterm,
NMFSparseLevel, NMFRobustResampleColumns, NMFRobustNRuns, NMFCalculateLeverage, NMFUseRobustLeverage,
NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns, NMFPriors, myStatusBox):
"""Estimate left and right hand matrices (robust version)
Input:
M: Input matrix
Mmis: Define missing values (0 = missing cell, 1 = real cell)
Mt0: Initial left hand matrix
Mw0: Initial right hand matrix
nc: NMF rank
tolerance: Convergence threshold
precision: Replace 0-values in multiplication rules
LogIter: Log results through iterations
MaxIterations: Max iterations
NMFAlgo: =1,3: Divergence; =2,4: Least squares;
NMFFixUserLHE: = 1 => fixed left hand matrix columns
NMFFixUserRHE: = 1 => fixed right hand matrix columns
NMFMaxInterm: Max iterations for warmup multiplication rules
NMFSparseLevel: Requested sparsity in terms of relative number of rows with 0 values in right hand matrix
NMFRobustResampleColumns: Resample columns during bootstrap
NMFRobustNRuns: Number of bootstrap runs
NMFCalculateLeverage: Calculate leverages
NMFUseRobustLeverage: Calculate leverages based on robust max across factoring columns
NMFFindParts: Enforce convexity on left hand matrix
NMFFindCentroids: Enforce convexity on right hand matrix
NMFKernel: Type of kernel used; 1: linear; 2: quadraitc; 3: radial
NMFReweighColumns: Reweigh columns in 2nd step of parts-based NMF
NMFPriors: Priors on right hand matrix
Output:
Mt: Left hand matrix
Mw: Right hand matrix
MtPct: Percent robust clustered rows
MwPct: Percent robust clustered columns
diff: Objective minimum achieved
Mh: Convexity matrix
flagNonconvex: Updated non-convexity flag on left hand matrix
"""
# Check parameter consistency (and correct if needed)
AddMessage = []
ErrMessage =''
cancel_pressed = 0
nc = int(nc)
if NMFFixUserLHE*NMFFixUserRHE == 1:
return Mt0, Mw0, np.array([]), np.array([]), 0, np.array([]), 0, AddMessage, ErrMessage, cancel_pressed
if (nc == 1) & (NMFAlgo > 2):
NMFAlgo -= 2
if NMFAlgo <= 2:
NMFRobustNRuns = 0
Mmis = Mmis.astype(np.int)
n_Mmis = Mmis.shape[0]
if n_Mmis == 0:
ID = np.where(np.isnan(M) == True)
n_Mmis = ID[0].size
if n_Mmis > 0:
Mmis = (np.isnan(M) == False)
Mmis = Mmis.astype(np.int)
M[Mmis == 0] = 0
else:
M[Mmis == 0] = 0
if NMFRobustResampleColumns > 0:
M = np.copy(M).T
if n_Mmis > 0:
Mmis = np.copy(Mmis).T
Mtemp = np.copy(Mw0)
Mw0 = np.copy(Mt0)
Mt0 = Mtemp
NMFFixUserLHEtemp = NMFFixUserLHE
NMFFixUserLHE = NMFFixUserRHE
NMFFixUserRHE = NMFFixUserLHEtemp
n, p = M.shape
try:
n_NMFPriors, nc = NMFPriors.shape
except:
n_NMFPriors = 0
NMFRobustNRuns = int(NMFRobustNRuns)
MtPct = np.nan
MwPct = np.nan
flagNonconvex = 0
# Step 1: NMF
Status = "Step 1 - NMF Ncomp=" + str(nc) + ": "
Mt, Mw, diffsup, Mhsup, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M, Mmis, Mt0, Mw0, nc, tolerance, precision, LogIter, Status, MaxIterations, NMFAlgo,
NMFFixUserLHE, NMFFixUserRHE, NMFMaxInterm, 100, NMFSparseLevel,
NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns, NMFPriors, flagNonconvex, AddMessage, myStatusBox)
Mtsup = np.copy(Mt)
Mwsup = np.copy(Mw)
if (n_NMFPriors > 0) & (NMFReweighColumns > 0):
# Run again with fixed LHE & no priors
Status = "Step 1bis - NMF (fixed LHE) Ncomp=" + str(nc) + ": "
Mw = np.ones((p, nc)) / math.sqrt(p)
Mt, Mw, diffsup, Mh, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M, Mmis, Mtsup, Mw, nc, tolerance, precision, LogIter, Status, MaxIterations, NMFAlgo, nc, 0, NMFMaxInterm, 100,
NMFSparseLevel, NMFFindParts, NMFFindCentroids, NMFKernel, 0, NMFPriors, flagNonconvex, AddMessage,
myStatusBox)
Mtsup = np.copy(Mt)
Mwsup = np.copy(Mw)
# Bootstrap to assess robust clustering
if NMFRobustNRuns > 1:
# Update Mwsup
MwPct = np.zeros((p, nc))
MwBlk = np.zeros((p, NMFRobustNRuns * nc))
for iBootstrap in range(0, NMFRobustNRuns):
Boot = np.random.randint(n, size=n)
Status = "Step 2 - " + \
"Boot " + str(iBootstrap + 1) + "/" + str(NMFRobustNRuns) + " NMF Ncomp=" + str(nc) + ": "
if n_Mmis > 0:
Mt, Mw, diff, Mh, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M[Boot, :], Mmis[Boot, :], Mtsup[Boot, :], Mwsup, nc, 1.e-3, precision, LogIter, Status, MaxIterations, NMFAlgo, nc, 0,
NMFMaxInterm, 20, NMFSparseLevel, NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns,
NMFPriors, flagNonconvex, AddMessage, myStatusBox)
else:
Mt, Mw, diff, Mh, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M[Boot, :], Mmis, Mtsup[Boot, :], Mwsup, nc, 1.e-3, precision, LogIter, Status, MaxIterations, NMFAlgo, nc, 0,
NMFMaxInterm, 20, NMFSparseLevel, NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns,
NMFPriors, flagNonconvex, AddMessage, myStatusBox)
for k in range(0, nc):
MwBlk[:, k * NMFRobustNRuns + iBootstrap] = Mw[:, k]
Mwn = np.zeros((p, nc))
for k in range(0, nc):
if (NMFAlgo == 2) | (NMFAlgo == 4):
ScaleMw = np.linalg.norm(MwBlk[:, k * NMFRobustNRuns + iBootstrap])
else:
ScaleMw = np.sum(MwBlk[:, k * NMFRobustNRuns + iBootstrap])
if ScaleMw > 0:
MwBlk[:, k * NMFRobustNRuns + iBootstrap] = \
MwBlk[:, k * NMFRobustNRuns + iBootstrap] / ScaleMw
Mwn[:, k] = MwBlk[:, k * NMFRobustNRuns + iBootstrap]
ColClust = np.zeros(p, dtype=int)
if NMFCalculateLeverage > 0:
Mwn, AddMessage, ErrMessage, cancel_pressed = Leverage(Mwn, NMFUseRobustLeverage, AddMessage,
myStatusBox)
for j in range(0, p):
ColClust[j] = np.argmax(np.array(Mwn[j, :]))
MwPct[j, ColClust[j]] = MwPct[j, ColClust[j]] + 1
MwPct = MwPct / NMFRobustNRuns
# Update Mtsup
MtPct = np.zeros((n, nc))
for iBootstrap in range(0, NMFRobustNRuns):
Status = "Step 3 - " + \
"Boot " + str(iBootstrap + 1) + "/" + str(NMFRobustNRuns) + " NMF Ncomp=" + str(nc) + ": "
Mw = np.zeros((p, nc))
for k in range(0, nc):
Mw[:, k] = MwBlk[:, k * NMFRobustNRuns + iBootstrap]
Mt, Mw, diff, Mh, NMFPriors, flagNonconvex, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M, Mmis, Mtsup, Mw, nc, 1.e-3, precision, LogIter, Status, MaxIterations, NMFAlgo, 0, nc, NMFMaxInterm, 20,
NMFSparseLevel, NMFFindParts, NMFFindCentroids, NMFKernel, NMFReweighColumns, NMFPriors, flagNonconvex,
AddMessage, myStatusBox)
RowClust = np.zeros(n, dtype=int)
if NMFCalculateLeverage > 0:
Mtn, AddMessage, ErrMessage, cancel_pressed = Leverage(Mt, NMFUseRobustLeverage, AddMessage,
myStatusBox)
else:
Mtn = Mt
for i in range(0, n):
RowClust[i] = np.argmax(Mtn[i, :])
MtPct[i, RowClust[i]] = MtPct[i, RowClust[i]] + 1
MtPct = MtPct / NMFRobustNRuns
Mt = Mtsup
Mw = Mwsup
Mh = Mhsup
diff = diffsup
if NMFRobustResampleColumns > 0:
Mtemp = np.copy(Mt)
Mt = np.copy(Mw)
Mw = Mtemp
Mtemp = np.copy(MtPct)
MtPct = np.copy(MwPct)
MwPct = Mtemp
return Mt, Mw, MtPct, MwPct, diff, Mh, flagNonconvex, AddMessage, ErrMessage, cancel_pressed
def NTFInit(M, Mmis, Mt_nmf, Mw_nmf, nc, tolerance, precision, LogIter, NTFUnimodal,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, init_type, myStatusBox):
"""Initialize NTF components for HALS
Input:
M: Input tensor
Mmis: Define missing values (0 = missing cell, 1 = real cell)
Mt_nmf: initialization of LHM in NMF(unstacked tensor), may be empty
Mw_nmf: initialization of RHM of NMF(unstacked tensor), may be empty
nc: NTF rank
tolerance: Convergence threshold
precision: Replace 0-values in multiplication rules
LogIter: Log results through iterations
NTFUnimodal: Apply Unimodal constraint on factoring vectors
NTFLeftComponents: Apply Unimodal/Smooth constraint on left hand matrix
NTFRightComponents: Apply Unimodal/Smooth constraint on right hand matrix
NTFBlockComponents: Apply Unimodal/Smooth constraint on block hand matrix
NBlocks: Number of NTF blocks
init_type : integer, default 0
init_type = 0 : NMF initialization applied on the reshaped matrix [1st dim x vectorized (2nd & 3rd dim)]
init_type = 1 : NMF initialization applied on the reshaped matrix [vectorized (1st & 2nd dim) x 3rd dim]
Output:
Mt: Left hand matrix
Mw: Right hand matrix
Mb: Block hand matrix
"""
AddMessage = []
n, p = M.shape
Mmis = Mmis.astype(np.int)
n_Mmis = Mmis.shape[0]
if n_Mmis == 0:
ID = np.where(np.isnan(M) == True)
n_Mmis = ID[0].size
if n_Mmis > 0:
Mmis = (np.isnan(M) == False)
Mmis = Mmis.astype(np.int)
M[Mmis == 0] = 0
nc = int(nc)
NBlocks = int(NBlocks)
init_type = int(init_type)
Status0 = "Step 1 - Quick NMF Ncomp=" + str(nc) + ": "
if init_type == 1:
#Init legacy
Mstacked, Mmis_stacked = NTFStack(M, Mmis, NBlocks)
nc2 = min(nc, NBlocks) # factorization rank can't be > number of blocks
if (Mt_nmf.shape[0] == 0) or (Mw_nmf.shape[0] == 0):
Mt_nmf, Mw_nmf = NMFInit(Mstacked, Mmis_stacked, np.array([]), np.array([]), nc2, tolerance, LogIter, myStatusBox)
else:
Mt_nmf, Mw_nmf = NMFInit(Mstacked, Mmis_stacked, Mt_nmf, Mw_nmf, nc2, tolerance, LogIter, myStatusBox)
# Quick NMF
Mt_nmf, Mw_nmf, diff, Mh, dummy1, dummy2, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
Mstacked, Mmis_stacked, Mt_nmf, Mw_nmf, nc2, tolerance, precision, LogIter, Status0,
10, 2, 0, 0, 1, 1, 0, 0, 0, 1, 0, np.array([]), 0, AddMessage, myStatusBox)
# Factorize Left vectors and distribute multiple factors if nc2 < nc
Mt = np.zeros((n, nc))
Mw = np.zeros((int(p / NBlocks), nc))
Mb = np.zeros((NBlocks, nc))
NFact = int(np.ceil(nc / NBlocks))
for k in range(0, nc2):
myStatusBox.update_status(delay=1, status="Start SVD...")
U, d, V = svds(np.reshape(Mt_nmf[:, k], (int(p / NBlocks), n)).T, k=NFact)
V = V.T
#svds returns singular vectors in reverse order
U = U[:,::-1]
V = V[:,::-1]
d = d[::-1]
myStatusBox.update_status(delay=1, status="SVD completed")
for iFact in range(0, NFact):
ind = iFact * NBlocks + k
if ind < nc:
U1 = U[:, iFact]
U2 = -U[:, iFact]
U1[U1 < 0] = 0
U2[U2 < 0] = 0
V1 = V[:, iFact]
V2 = -V[:, iFact]
V1[V1 < 0] = 0
V2[V2 < 0] = 0
U1 = np.reshape(U1, (n, 1))
V1 = np.reshape(V1, (1, int(p / NBlocks)))
U2 = np.reshape(U2, (n, 1))
V2 = np.reshape(V2, ((1, int(p / NBlocks))))
if np.linalg.norm(U1 @ V1) > np.linalg.norm(U2 @ V2):
Mt[:, ind] = np.reshape(U1, n)
Mw[:, ind] = d[iFact] * np.reshape(V1, int(p / NBlocks))
else:
Mt[:, ind] = np.reshape(U2, n)
Mw[:, ind] = d[iFact] * np.reshape(V2, int(p / NBlocks))
Mb[:, ind] = Mw_nmf[:, k]
else:
#Init default
if (Mt_nmf.shape[0] == 0) or (Mw_nmf.shape[0] == 0):
Mt_nmf, Mw_nmf = NMFInit(M, Mmis, np.array([]), np.array([]), nc, tolerance, LogIter, myStatusBox)
else:
Mt_nmf, Mw_nmf = NMFInit(M, Mmis, Mt_nmf, Mw_nmf, nc, tolerance, LogIter, myStatusBox)
# Quick NMF
Mt_nmf, Mw_nmf, diff, Mh, dummy1, dummy2, AddMessage, ErrMessage, cancel_pressed = NMFSolve(
M, Mmis, Mt_nmf, Mw_nmf, nc, tolerance, precision, LogIter, Status0,
10, 2, 0, 0, 1, 1, 0, 0, 0, 1, 0, np.array([]), 0, AddMessage, myStatusBox)
#Factorize Left vectors
Mt = np.zeros((n, nc))
Mw = np.zeros((int(p / NBlocks), nc))
Mb = np.zeros((NBlocks, nc))
for k in range(0, nc):
myStatusBox.update_status(delay=1, status="Start SVD...")
U, d, V = svds(np.reshape(Mw_nmf[:, k], (int(p / NBlocks), NBlocks)), k=1)
V = V.T
U = np.abs(U)
V = np.abs(V)
myStatusBox.update_status(delay=1, status="SVD completed")
Mt[:, k] = Mt_nmf[:, k]
Mw[:, k] = d[0] * np.reshape(U, int(p / NBlocks))
Mb[:, k] = np.reshape(V, NBlocks)
for k in range(0, nc):
if (NTFUnimodal > 0) & (NTFLeftComponents > 0):
# Enforce unimodal distribution
tmax = np.argmax(Mt[:, k])
for i in range(tmax + 1, n):
Mt[i, k] = min(Mt[i - 1, k], Mt[i, k])
for i in range(tmax - 1, -1, -1):
Mt[i, k] = min(Mt[i + 1, k], Mt[i, k])
if (NTFUnimodal > 0) & (NTFRightComponents > 0):
# Enforce unimodal distribution
wmax = np.argmax(Mw[:, k])
for j in range(wmax + 1, int(p / NBlocks)):
Mw[j, k] = min(Mw[j - 1, k], Mw[j, k])
for j in range(wmax - 1, -1, -1):
Mw[j, k] = min(Mw[j + 1, k], Mw[j, k])
if (NTFUnimodal > 0) & (NTFBlockComponents > 0):
# Enforce unimodal distribution
bmax = np.argmax(Mb[:, k])
for iBlock in range(bmax + 1, NBlocks):
Mb[iBlock, k] = min(Mb[iBlock - 1, k], Mb[iBlock, k])
for iBlock in range(bmax - 1, -1, -1):
Mb[iBlock, k] = min(Mb[iBlock + 1, k], Mb[iBlock, k])
return [Mt, Mw, Mb, AddMessage, ErrMessage, cancel_pressed]
def rNTFSolve(M, Mmis, Mt0, Mw0, Mb0, nc, tolerance, precision, LogIter, MaxIterations, NMFFixUserLHE, NMFFixUserRHE,
NMFFixUserBHE, NMFAlgo, NMFRobustNRuns, NMFCalculateLeverage, NMFUseRobustLeverage, NTFFastHALS, NTFNIterations,
NMFSparseLevel, NTFUnimodal, NTFSmooth, NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv,
NMFPriors, myStatusBox):
"""Estimate NTF matrices (robust version)
Input:
M: Input matrix
Mmis: Define missing values (0 = missing cell, 1 = real cell)
Mt0: Initial left hand matrix
Mw0: Initial right hand matrix
Mb0: Initial block hand matrix
nc: NTF rank
tolerance: Convergence threshold
precision: Replace 0-values in multiplication rules
LogIter: Log results through iterations
MaxIterations: Max iterations
NMFFixUserLHE: fix left hand matrix columns: = 1, else = 0
NMFFixUserRHE: fix right hand matrix columns: = 1, else = 0
NMFFixUserBHE: fix block hand matrix columns: = 1, else = 0
NMFAlgo: =5: Non-robust version, =6: Robust version
NMFRobustNRuns: Number of bootstrap runs
NMFCalculateLeverage: Calculate leverages
NMFUseRobustLeverage: Calculate leverages based on robust max across factoring columns
NTFFastHALS: Use Fast HALS (does not accept handle missing values and convolution)
NTFNIterations: Warmup iterations for fast HALS
NMFSparseLevel : sparsity level (as defined by Hoyer); +/- = make RHE/LHe sparse
NTFUnimodal: Apply Unimodal constraint on factoring vectors
NTFSmooth: Apply Smooth constraint on factoring vectors
NTFLeftComponents: Apply Unimodal/Smooth constraint on left hand matrix
NTFRightComponents: Apply Unimodal/Smooth constraint on right hand matrix
NTFBlockComponents: Apply Unimodal/Smooth constraint on block hand matrix
NBlocks: Number of NTF blocks
NTFNConv: Half-Size of the convolution window on 3rd-dimension of the tensor
NMFPriors: Elements in Mw that should be updated (others remain 0)
Output:
Mt_conv: Convolutional Left hand matrix
Mt: Left hand matrix
Mw: Right hand matrix
Mb: Block hand matrix
MtPct: Percent robust clustered rows
MwPct: Percent robust clustered columns
diff : Objective minimum achieved
"""
AddMessage = []
ErrMessage = ''
cancel_pressed = 0
n, p0 = M.shape
nc = int(nc)
NBlocks = int(NBlocks)
p = int(p0 / NBlocks)
NTFNConv = int(NTFNConv)
if NMFFixUserLHE*NMFFixUserRHE*NMFFixUserBHE == 1:
return np.zeros((n, nc*(2*NTFNConv+1))), Mt0, Mw0, Mb0, np.zeros((n, p0)), np.ones((n, nc)), np.ones((p, nc)), AddMessage, ErrMessage, cancel_pressed
Mmis = Mmis.astype(np.int)
n_Mmis = Mmis.shape[0]
if n_Mmis == 0:
ID = np.where(np.isnan(M) == True)
n_Mmis = ID[0].size
if n_Mmis > 0:
Mmis = (np.isnan(M) == False)
Mmis = Mmis.astype(np.int)
M[Mmis == 0] = 0
else:
M[Mmis == 0] = 0
NTFNIterations = int(NTFNIterations)
NMFRobustNRuns = int(NMFRobustNRuns)
Mt = np.copy(Mt0)
Mw = np.copy(Mw0)
Mb = np.copy(Mb0)
Mt_conv = np.array([])
# Check parameter consistency (and correct if needed)
if (nc == 1) | (NMFAlgo == 5):
NMFRobustNRuns = 0
if NMFRobustNRuns == 0:
MtPct = np.nan
MwPct = np.nan
if (n_Mmis > 0 or NTFNConv > 0 or NMFSparseLevel != 0) and NTFFastHALS > 0:
NTFFastHALS = 0
reverse2HALS = 1
else:
reverse2HALS = 0
# Step 1: NTF
Status0 = "Step 1 - NTF Ncomp=" + str(nc) + ": "
if NTFFastHALS > 0:
if NTFNIterations > 0:
Mt_conv, Mt, Mw, Mb, diff, cancel_pressed = NTFSolve(
M, Mmis, Mt, Mw, Mb, nc, tolerance, LogIter, Status0,
NTFNIterations, NMFFixUserLHE, NMFFixUserRHE, NMFFixUserBHE, 0, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv, NMFPriors, myStatusBox)
Mt, Mw, Mb, diff, cancel_pressed = NTFSolveFast(
M, Mmis, Mt, Mw, Mb, nc, tolerance, precision, LogIter, Status0,
MaxIterations, NMFFixUserLHE, NMFFixUserRHE, NMFFixUserBHE, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, myStatusBox)
else:
Mt_conv, Mt, Mw, Mb, diff, cancel_pressed = NTFSolve(
M, Mmis, Mt, Mw, Mb, nc, tolerance, LogIter, Status0,
MaxIterations, NMFFixUserLHE, NMFFixUserRHE, NMFFixUserBHE, NMFSparseLevel, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv, NMFPriors, myStatusBox)
Mtsup = np.copy(Mt)
Mwsup = np.copy(Mw)
Mbsup = np.copy(Mb)
diff_sup = diff
# Bootstrap to assess robust clustering
if NMFRobustNRuns > 1:
# Update Mwsup
MwPct = np.zeros((p, nc))
MwBlk = np.zeros((p, NMFRobustNRuns * nc))
for iBootstrap in range(0, NMFRobustNRuns):
Boot = np.random.randint(n, size=n)
Status0 = "Step 2 - " + \
"Boot " + str(iBootstrap + 1) + "/" + str(NMFRobustNRuns) + " NTF Ncomp=" + str(nc) + ": "
if NTFFastHALS > 0:
if n_Mmis > 0:
Mt, Mw, Mb, diff, cancel_pressed = NTFSolveFast(
M[Boot, :], Mmis[Boot, :], Mtsup[Boot, :], Mwsup, Mb, nc, 1.e-3, precision, LogIter, Status0,
MaxIterations, 1, 0, NMFFixUserBHE, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, myStatusBox)
else:
Mt, Mw, Mb, diff, cancel_pressed = NTFSolveFast(
M[Boot, :], np.array([]), Mtsup[Boot, :], Mwsup, Mb, nc, 1.e-3, precision, LogIter, Status0,
MaxIterations, 1, 0, NMFFixUserBHE, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, myStatusBox)
else:
if n_Mmis > 0:
Mt_conv, Mt, Mw, Mb, diff, cancel_pressed = NTFSolve(
M[Boot, :], Mmis[Boot, :], Mtsup[Boot, :], Mwsup, Mb, nc, 1.e-3, LogIter, Status0,
MaxIterations, 1, 0, NMFFixUserBHE, NMFSparseLevel, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv, NMFPriors, myStatusBox)
else:
Mt_conv, Mt, Mw, Mb, diff, cancel_pressed = NTFSolve(
M[Boot, :], np.array([]), Mtsup[Boot, :], Mwsup, Mb, nc, 1.e-3, LogIter, Status0,
MaxIterations, 1, 0, NMFFixUserBHE, NMFSparseLevel, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv, NMFPriors, myStatusBox)
for k in range(0, nc):
MwBlk[:, k * NMFRobustNRuns + iBootstrap] = Mw[:, k]
Mwn = np.zeros((p, nc))
for k in range(0, nc):
ScaleMw = np.linalg.norm(MwBlk[:, k * NMFRobustNRuns + iBootstrap])
if ScaleMw > 0:
MwBlk[:, k * NMFRobustNRuns + iBootstrap] = \
MwBlk[:, k * NMFRobustNRuns + iBootstrap] / ScaleMw
Mwn[:, k] = MwBlk[:, k * NMFRobustNRuns + iBootstrap]
ColClust = np.zeros(p, dtype=int)
if NMFCalculateLeverage > 0:
Mwn, AddMessage, ErrMessage, cancel_pressed = Leverage(Mwn, NMFUseRobustLeverage, AddMessage,
myStatusBox)
for j in range(0, p):
ColClust[j] = np.argmax(np.array(Mwn[j, :]))
MwPct[j, ColClust[j]] = MwPct[j, ColClust[j]] + 1
MwPct = MwPct / NMFRobustNRuns
# Update Mtsup
MtPct = np.zeros((n, nc))
for iBootstrap in range(0, NMFRobustNRuns):
Status0 = "Step 3 - " + \
"Boot " + str(iBootstrap + 1) + "/" + str(NMFRobustNRuns) + " NTF Ncomp=" + str(nc) + ": "
Mw = np.zeros((p, nc))
for k in range(0, nc):
Mw[:, k] = MwBlk[:, k * NMFRobustNRuns + iBootstrap]
if NTFFastHALS > 0:
Mt, Mw, Mb, diff, cancel_pressed = NTFSolveFast(
M, Mmis, Mtsup, Mw, Mb, nc, 1.e-3, precision, LogIter, Status0, MaxIterations, 0, 1, NMFFixUserBHE,
NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, myStatusBox)
else:
Mt_conv, Mt, Mw, Mb, diff, cancel_pressed = NTFSolve(
M, Mmis, Mtsup, Mw, Mb, nc, 1.e-3, LogIter, Status0, MaxIterations, 0, 1, NMFFixUserBHE,
NMFSparseLevel, NTFUnimodal, NTFSmooth,
NTFLeftComponents, NTFRightComponents, NTFBlockComponents, NBlocks, NTFNConv, NMFPriors, myStatusBox)
RowClust = np.zeros(n, dtype=int)
if NMFCalculateLeverage > 0:
Mtn, AddMessage, ErrMessage, cancel_pressed = Leverage(Mt, NMFUseRobustLeverage, AddMessage,
myStatusBox)
else:
Mtn = Mt
for i in range(0, n):
RowClust[i] = np.argmax(Mtn[i, :])
MtPct[i, RowClust[i]] = MtPct[i, RowClust[i]] + 1
MtPct = MtPct / NMFRobustNRuns
Mt = Mtsup
Mw = Mwsup
Mb = Mbsup
diff = diff_sup
if reverse2HALS > 0:
AddMessage.insert(len(AddMessage), 'Currently, Fast HALS cannot be applied with missing data or convolution window and was reversed to Simple HALS.')
return Mt_conv, Mt, Mw, Mb, MtPct, MwPct, diff, AddMessage, ErrMessage, cancel_pressed
def rSVDSolve(M, Mmis, nc, tolerance, LogIter, LogTrials, Status0, MaxIterations,
SVDAlgo, SVDCoverage, SVDNTrials, myStatusBox):
"""Estimate SVD matrices (robust version)
Input:
M: Input matrix
Mmis: Define missing values (0 = missing cell, 1 = real cell)
nc: SVD rank
tolerance: Convergence threshold
LogIter: Log results through iterations
LogTrials: Log results through trials
Status0: Initial displayed status to be updated during iterations
MaxIterations: Max iterations
SVDAlgo: =1: Non-robust version, =2: Robust version
SVDCoverage: Coverage non-outliers (robust version)
SVDNTrials: Number of trials (robust version)
Output:
Mt: Left hand matrix
Mev: Scaling factors
Mw: Right hand matrix
Mmis: Matrix of missing/flagged outliers
Mmsr: Vector of Residual SSQ
Mmsr2: Vector of Reidual variance
Reference
---------
L. Liu et al (2003) Robust singular value decomposition analysis of microarray data
PNAS November 11, 2003 vol. 100 no. 23 13167–13172
"""
AddMessage = []
ErrMessage = ''
cancel_pressed = 0
# M0 is the running matrix (to be factorized, initialized from M)
M0 = np.copy(M)
n, p = M0.shape
Mmis = Mmis.astype(np.bool_)
n_Mmis = Mmis.shape[0]
if n_Mmis > 0:
M0[Mmis == False] = np.nan
else:
Mmis = (np.isnan(M0) == False)
Mmis = Mmis.astype(np.bool_)
n_Mmis = Mmis.shape[0]
trace0 = np.sum(M0[Mmis] ** 2)
nc = int(nc)
SVDNTrials = int(SVDNTrials)
nxp = n * p
nxpcov = int(round(nxp * SVDCoverage, 0))
Mmsr = np.zeros(nc)
Mmsr2 = np.zeros(nc)
Mev = np.zeros(nc)
if SVDAlgo == 2:
MaxTrial = SVDNTrials
else:
MaxTrial = 1
Mw = np.zeros((p, nc))
Mt = np.zeros((n, nc))
Mdiff = np.zeros((n, p))
w = np.zeros(p)
t = np.zeros(n)
wTrial = np.zeros(p)
tTrial = np.zeros(n)
MmisTrial = np.zeros((n, p), dtype=np.bool)
# Outer-reference M becomes local reference M, which is the running matrix within ALS/LTS loop.
M = np.zeros((n, p))
wnorm = np.zeros((p, n))
tnorm = np.zeros((n, p))
denomw = np.zeros(n)
denomt = | np.zeros(p) | numpy.zeros |
import json
import logging
import os
from typing import List, Dict
import click
import numpy as np
import tensorflow as tf
from sklearn.metrics import cohen_kappa_score, precision_recall_fscore_support, accuracy_score
from tqdm import tqdm
from discopy.components.component import Component
from discopy.components.connective.base import get_connective_candidates
from discopy.evaluate.conll import evaluate_docs, print_results
from discopy.utils import init_logger
from discopy_data.data.doc import Document
from discopy_data.data.loaders.conll import load_bert_conll_dataset
from discopy_data.data.relation import Relation
logger = logging.getLogger('discopy')
def get_conn_model(in_size, out_size, hidden_size, hidden_size2=256):
x = y = tf.keras.layers.Input(shape=(in_size,), name='connective')
y = tf.keras.layers.Dense(hidden_size, kernel_initializer='lecun_normal', activation='selu')(y)
y = tf.keras.layers.Dropout(0.3)(y)
y = tf.keras.layers.Dense(hidden_size2, kernel_initializer='lecun_normal', activation='selu')(y)
y = tf.keras.layers.Dropout(0.3)(y)
y = tf.keras.layers.Dense(out_size, activation='softmax')(y)
model = tf.keras.models.Model(x, y)
optimizer = tf.keras.optimizers.RMSprop()
model.compile(optimizer, 'sparse_categorical_crossentropy', metrics=[
"accuracy",
])
return model
def get_bert_features(idxs, doc_bert, used_context=0):
idxs = list(idxs)
pad = | np.zeros_like(doc_bert[0]) | numpy.zeros_like |
import numpy as np
import random
import time
from matplotlib import pyplot as plt
import pandas as pd
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
from scipy.special import gammainc
from itertools import islice
# Initialize random number generator:
np.random.seed(int(100*time.perf_counter()))
## ----------------------------------------------------------------------------
## --------------------------- Plotting functions -----------------------------
## ----------------------------------------------------------------------------
def KMG_analysis(events, times):
""" Kaplan-Meier survivial funciton with greenwood's forumla to estimate variance.
Args:
-------
events:
times:
"""
S = np.ones(len(times)+1) #survival probability
S[0] = 1
V = np.ones(len(times)+1) #variance of S (Greenwood's formula)
V_cumulative = np.zeros(len(times)+1)
V[0] = 0
num_of_events = np.sum(events)
for i, times in enumerate(times):
S[i] = S[i-1] * (1 - events[i-1]/num_of_events)
V_cumulative[i] = V_cumulative[i-1] + events[i-1]/(num_of_events*(num_of_events-events[i-1]))
V[i] = S[i]**2 * V_cumulative[i]
return S, V
def distribution_alternatives(distribution, num_alter, overlap):
""" include "new" error function for cumulative distributions
Args:
------
distribution: list, array
Original data to compare to.
num_alter: int
Number of alternative distributions to generate
overlap: int, float
What fraction to (randomly) draw from original one.
"""
if overlap < 1:
# Assume that fraction between 0 and 1 was given
overlap = int(np.ceil(overlap*len(distribution)))
else:
# Assume that overlap was given as number of desired elements (not percentage!)
print("Overlap given will not be interpreted as percentage!")
overlap = int(overlap)
num_initial = len(distribution)
distribution_alternatives = np.zeros((overlap, num_alter))
if distribution[0] == 0: #if (cumulative) distribution sorted and startins from 0
for i in range(0, num_alter):
random_index = random.sample(range(1, num_initial), overlap-1)
random_index = np.append(random_index, 0)
random_index = np.sort(random_index)
distribution_alternatives[:,i] = distribution[random_index]
else:
for i in range(0, num_alter):
random_index = random.sample(range(0, num_initial), overlap)
random_index = np.sort(random_index)
distribution_alternatives[:,i] = distribution[random_index]
return distribution_alternatives
def distribution_compare(Cum_hist1,
Cum_hist2,
num_interpol=10000):
"""
Function to compare to input distributions.
Args:
--------
Cum_hist1: list, array #TODO: check!
Cum_hist2: list, array #TODO: check!
num_interpol: int, optional
Number of interpolation bins. Default = 10000.
"""
y1 = 1/(len(Cum_hist1)-1) * np.arange(0, len(Cum_hist1) , 1)
y2 = 1/(len(Cum_hist2)-1) * np.arange(0, len(Cum_hist2) , 1)
fit1 = interp1d(Cum_hist1, y1, kind='nearest')
fit2 = interp1d(Cum_hist2, y2, kind='nearest')
xnew = np.linspace(0,min(max(Cum_hist1),max(Cum_hist2)), num=num_interpol) # only look at first 95% (ignore weird end)
return (np.mean((fit1(xnew) - fit2(xnew))**2))
def valid_EB_runs(simPa,
EB_comet_sum,
barrier_contact_times = []):
""" Function to select valid runs (runs longer than min_length_run).
Args:
-------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
EB_comet_sum: list
List containing EB counts in comet. #TODO: check
barrier_contact_times: list
List containing barrier contact times.
"""
# Select valid runs
b = []
if simPa.barrier: # if barrier present
for a in range(0, len(EB_comet_sum)):
b.append(len(EB_comet_sum[a]) * simPa.frame_rate_actual - barrier_contact_times[a])
else:
for a in range(0, len(EB_comet_sum)):
b.append(len(EB_comet_sum[a]) * simPa.frame_rate_actual)
valid_runs = np.where(np.array(b) > simPa.min_length_run)[0]
return valid_runs
def analyse_EB_signal(simPa,
EB_comet_sum,
barrier_contact_times):
""" Function to analyse EB signal
Args:
-------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
EB_comet_sum: list
List containing EB counts in comet. #TODO: check
barrier_contact_times: list
List containing barrier contact times.
"""
# Select valid runs
valid_runs = valid_EB_runs(simPa, EB_comet_sum, barrier_contact_times)
max_barrier_contact_frames = int(round(np.max(barrier_contact_times/simPa.frame_rate_actual),0))
min_length_run_frames = int(simPa.min_length_run/simPa.frame_rate_actual)
frame_window = min_length_run_frames + max_barrier_contact_frames
EB_signal = np.zeros((len(valid_runs), frame_window+1)) #simPa.min_length_run+1+max_barrier_contact)) #put individual runs into one np.array
normalize_EB_signal = np.zeros(frame_window+1) #simPa.min_length_run+1+max_barrier_contact)
for a in range(0,len(valid_runs)):
frame_barrier_contact = int(np.round(barrier_contact_times[valid_runs[a]]/simPa.frame_rate_actual,0))
EB_signal[a][(max_barrier_contact_frames-frame_barrier_contact):frame_window] \
= np.array(EB_comet_sum[valid_runs[a]])[0:(min_length_run_frames+frame_barrier_contact)]
normalize_EB_signal[(max_barrier_contact_frames-frame_barrier_contact):frame_window] +=1
EB_signal_average = np.sum(EB_signal, axis=0)
EB_signal_average = EB_signal_average/normalize_EB_signal
return EB_signal, EB_signal_average, max_barrier_contact_frames, min_length_run_frames, frame_window
def analyse_EB_profile(simPa,
MT_length_full,
EB_profiles,
w_size):
""" Calculate the mean GTP/GDP-Pi (or EB) profile at the microtubule end during steady-state growth.
The resulting profile is not convolved wit a Gaussian and thus represents the theoretical profile.
Args:
------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
TODO: add documentation
...
"""
# Initialize arays
v_mean = []
EB_mean = []
# Microscope parameters
resolution = 0.25 #um
# Number of dimers in PSF
resolution_dimers = int(np.ceil(resolution/simPa.dL_dimer))
# Loop over number of simulated microtubules
count = 0
for num_run in range(len(MT_length_full)):
# Obtain the time and length arrays
time = np.arange(0, len(MT_length_full[num_run]), 1) * simPa.frame_rate_actual
MT_length = (np.asarray(MT_length_full[num_run]) - MT_length_full[num_run][0]) * simPa.dL_dimer *1000
# Calculate mean growth speed
v = np.polyfit(time, MT_length, 1)
v_mean.append(v[0])
if simPa.steady_state_analysis:
# Find the local mean growth speeds in order to exclude pausing state from the profile analysis
if len(MT_length) > w_size:
di = 0
v_fit = np.zeros(len(MT_length) - w_size + 1)
for i in window(MT_length, w_size):
v_fit[di] = np.polyfit(np.linspace(0, simPa.frame_rate_actual*w_size-1, w_size), i, 1)[0]
di = di + 1
else: v_fit = []
# Set velocity threshold
v_thres = 0.6
# Identify steady-state growth events in the trace
matches = [i for i, x in enumerate(v_fit) if x > v_thres*v_mean[-1]]
matches = np.asarray(matches) + w_size//2
if matches.size > 0:
for mm in matches:
# Extend the EB profile array
EB_new = np.append(EB_profiles[num_run][mm], np.zeros(resolution_dimers))
if count > 0:
EB_mean = EB_mean*(count/(count+1)) + EB_new*(1/(count+1))
count += 1
else:
EB_mean = EB_new
count += 1
else: # Include the complete growth trajectory to calculate the mean profile
for mm in range(len(EB_profiles[num_run])-1):
# Extend the EB profile array
EB_new = np.append(EB_profiles[num_run][mm], np.zeros(resolution_dimers))
if count > 0:
EB_mean = EB_mean*(count/(count+1)) + EB_new*(1/(count+1))
count += 1
else:
EB_mean = EB_new
count += 1
return EB_mean, v_mean
# -----------------------------------------------------------------------------
# -------------------------- Small helper functions ---------------------------
# -----------------------------------------------------------------------------
def frange(start, stop, step):
""" Function as alternative for "range, since "range" does not support floats.
"""
i = start
while i < stop:
yield i
i += step
def list_dim(lst):
""" Function to return the dimension of a list (e.g. nested list).
"""
if not type(lst) == list:
return 0
return len(lst) + list_dim(lst[0])
# Define Gaussian distribution
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def gamma_cdf(x, n, r):
return gammainc(n, r*x)
def exp_cdf(x, k):
return 1 - np.exp(-k*x)
# Define a sliding window
def window(seq, n):
""" Generater that returns a sliding window (of width n) over data from the iterable/
s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ...
"""
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
# -----------------------------------------------------------------------------
# ----------------------------- Figure functions ------------------------------
# -----------------------------------------------------------------------------
# Figure styles
from matplotlib.font_manager import FontProperties
font = FontProperties()
font.set_family('sans-serif')
font.set_style('normal')
font.set_weight('light')
def fig_sim_verification(simPa,
file_figure,
num_fig,
MT_length_full,
cap_end,
d_steps=1):
""" Compare the fixed parameter v_g and D_tip with the simulated results.
d_steps = 2 # in time_steps
Args:
------
simPa: parameter set
Simulation parameters in "ParameterSet" format
file_figure: str
Folder for storing figures and data
num_fig: int
Figure number
MT_length_full: numpy array ? #TODO:check
...
cap_end
...
d_steps: ...
"""
# Calculate the growth fluctuations
v_fluc = []
c_fluc = []
# Remove cap=seed position
L_seed = int(np.ceil(simPa.tip_window/simPa.dL_dimer))
for i in range(len(cap_end)):
cap_temp = cap_end
index = np.argmax(np.asarray(cap_end[i]) > L_seed)
del cap_temp[i][:index]
cap_end_ss = cap_temp
for i in range(len(MT_length_full)):
v_fluc.extend(np.diff(MT_length_full[i]))
c_fluc.extend(np.diff(cap_end_ss[i]))
sample_size = len(c_fluc)
c_fluc = np.sort(c_fluc)
index = np.argmax(np.asarray(c_fluc) > 0)
c_fluc = c_fluc[int(index):]
v_fluc = np.asarray(v_fluc)*(simPa.dL_dimer*1000)
c_fluc = np.asarray(c_fluc)*(simPa.dL_dimer*1000)
# Calculate the growth distribution based on the fixed parameters
mu = simPa.growth_rate_one*simPa.frame_rate_actual # mean growth rate in dimers/frame
sig = ((2*simPa.D_tip*(simPa.frame_rate_actual*d_steps))**0.5)
x = np.arange(mu-5*sig, mu+5*sig, 1)
G = gaussian(x, mu, sig)
G = G / np.sum(G) # normalize gaussian
# Plot the results
fig , (ax1, ax2) = plt.subplots(1,2, figsize=(12, 7))
ax1.hist(v_fluc, bins = 60, density = True, color = "skyblue", label = "simulated data")
ax1.plot(x, G, 'r', label = "theoretical distribution")
ax2.hist(c_fluc, bins = 60, density = True, color = "skyblue", label = "simulated data")
move = len(c_fluc)/sample_size
pause = 1 - move
step_mean = np.mean(c_fluc)
step_std = np.std(c_fluc)
if simPa.record_data:
filename = file_figure + '_fig' + str(int(num_fig))
plt.savefig(filename + '.eps', format='eps', dpi=1000)
plt.savefig(filename + '.png', format='png', dpi=200)
plt.show()
print('Pausing probability: %.2f' %pause)
print('Step probability: %.2f' %float(1-pause))
print('Mean step size: %.1f +- %.1f nm' %(step_mean, step_std))
print('Mean pausing duration: %.2f sec ' %float(step_mean/(simPa.growth_speed*1000/60)))
def fig_cat_dist(simPa,
file_figure,
num_fig,
catastrophe_times,
Cum_dist_compare):
""" Catastrophe distribution compared to data
Args:
------
simPa: parameter set
Simulation parameters in "ParameterSet" format
file_figure: str
Folder for storing figures and data
num_fig: int
Figure number
catastrophe_times: numpy array
Array of catastrophe times
Cum_hist_compare: list, array #TODO: check
Cumulative catastrophe time distribution for comparison
"""
if not isinstance(catastrophe_times, np.ndarray):
print('Catastrophe times input format must be numpy array!')
catastrophe_times = np.zeros(0)
if catastrophe_times.shape[0] > 1:
tau_c = np.mean(catastrophe_times) #i*dt/len(catastrophe_times)
print('Mean catastrophe time: %.2f s' %tau_c)
n_bins = int(np.ceil(len(catastrophe_times)/10))
fig = plt.figure(3)
plt.clf()
## Compare to data
bins=np.histogram(np.hstack((Cum_dist_compare,catastrophe_times)), bins=n_bins)[1] #get the bin edges
Hist_exp, edges_exp = np.histogram(Cum_dist_compare, bins = bins)
bin_width = edges_exp[1]
plt.bar((edges_exp[:-1] + bin_width/2) , np.float_(Hist_exp)/(sum(Hist_exp)), bin_width, alpha=0.5, color='gray')
Hist, edges = np.histogram(catastrophe_times, bins = bins)
plt.plot((edges[1:] -edges[1]/2), np.float_(Hist)/(sum(Hist)),'r-', linewidth=1.0)
#plt.title('Catastrophe distribution')
plt.xlabel('time [s]')
plt.ylabel('fraction of event')
fig.suptitle('Catastrophe distribution', fontsize=14, fontweight='bold')
plt.ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.9)
#Add parameters to figure
figtext = ['$v_{g} = %.2f \mu m/min$' %float(simPa.growth_rate_one*(60*simPa.dL_dimer))]
figtext = ['$EB = %.2f \mu M$' %float(simPa.EB)]
figtext.append('$D_{tip} = %.2f nm^2/s$)' %simPa.D_tip)
figtext.append('Cap unstable when in state "C" ')
figtext.append('in %r out of %r dimer layers.' %(int(simPa.unstable_cap_criteria-simPa.CAP_threshold),int(simPa.unstable_cap_criteria)))
figtext.append('Tip states:B->C with the rates:' )
figtext.append('$k_{hyd} = %.3f s^{-1}$' %(simPa.kBC ))
figtext.append('Results (n = %d) -------------------------------' %len(catastrophe_times))
figtext.append(r'$\tau_{C} = %.2f s$' %tau_c)
figDX = 0.045
for m in range(len(figtext)):
plt.ax.text(0.4, 0.9-m*figDX, figtext[m], fontproperties=font,
verticalalignment='bottom', horizontalalignment='left',
transform=plt.ax.transAxes, color='black', fontsize=8)
if simPa.record_data:
filename = file_figure + '_fig' + str(int(num_fig))
plt.savefig(filename+'.eps', format='eps', dpi=1000)
plt.savefig(filename+'.png', format='png', dpi=200)
plt.show()
else:
print('No proper input found.')
def fig_cat_cumulative(simPa, file_figure, num_fig, Cum_dist, Cum_dist_compare = [0]):
""" Plot cumulative catastrophe distribution (or barrier contact time distribution).
Compare to (experimental) data if given.
Args:
-------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
file_figure: str
Folder for storing figures and data.
num_fig: int
Figure number.
Cum_dist: list, array #TODO:check
Cumulative catastrophe time distribution.
Cum_hist_compare: list, array #TODO:check
Cumulative catastrophe time distribution for comparison.
"""
fig = plt.figure(1, figsize=(12, 7))
plt.clf()
# Check input cumulative distribution
if isinstance(Cum_dist, list) and list_dim(Cum_dist) > 1 and list_dim(Cum_dist[0]) > 0:
if isinstance(Cum_dist[0], np.ndarray):
print(list_dim(Cum_dist), ' different cumulative distributions found. ')
else:
print('Error: Input cumulative distributions must be numpy arrays or lists of numpy arrays.' )
elif isinstance(Cum_dist, list) and list_dim(Cum_dist) == 1 and isinstance(Cum_dist[0], np.ndarray):
pass;
elif isinstance(Cum_dist, np.ndarray):
Cum_dist = [Cum_dist] #put numpy array into list
else:
print('Error: Input cumulative distributions must be numpy arrays or lists of numpy arrays.' )
if len(Cum_dist_compare) > 1: # i.e.if comparison data is given
if isinstance(Cum_dist_compare, list):
if list_dim(Cum_dist_compare) == 1:
comparing_index = np.zeros(list_dim(Cum_dist))
elif list_dim(Cum_dist_compare) == list_dim(Cum_dist):
#Assume that one comparison distribution given for each Cum_dist + same ordering
print('Function assumes same pairing of distributions: 1-1, 2-2, ... ')
comparing_index = np.arange(0, list_dim(Cum_dist_compare))
else:
print('Error: Dimension of comparison distribution(s) does not match.' )
comparing_index = []
elif isinstance(Cum_dist_compare, np.ndarray):
Cum_dist_compare = [Cum_dist_compare]
comparing_index = np.zeros(list_dim(Cum_dist))
else:
print('Error: Input distributions must be numpy arrays or lists of numpy arrays.' )
comparing_index = []
if list_dim(Cum_dist) > 1:
c_range = 1/(list_dim(Cum_dist)-1)
else:
c_range = 1
print(c_range)
for i, Cum_dist in enumerate(Cum_dist):
print((0.95*(i+1)*c_range, 0.1, 0.1))
plt.step(Cum_dist, 1/(len(Cum_dist)-1) * np.arange(0, len(Cum_dist) , 1),
where='post', color=(0.95-0.7*(i)*c_range, 0.1, 0.1 + 0.8*(i)*c_range), linewidth=1.5, label='model results')
if len(Cum_dist_compare) > 1:
Cum_dist_compare_selected = Cum_dist_compare[int(comparing_index[i])]
#generate and draw distributions of same length as experimental data
print(Cum_dist_compare_selected.shape)
print(comparing_index)
overlap = Cum_dist_compare_selected.shape[0]
print('overlap: ', overlap)
num_distributions = 100
if overlap < len(Cum_dist): #needed: more simulation data points than experimental ones
Cum_dist_variants = distribution_alternatives(Cum_dist, num_distributions, overlap)
for m in range(0, num_distributions):
plt.step(Cum_dist_variants[:,m], 1/(overlap-1) * np.arange(0, overlap , 1),
where='post', color=(0.95-0.7*(i)*c_range, 0.3, 0.1 +0.8*(i)*c_range), alpha=0.25, linewidth=1.0)
plt.step(Cum_dist_compare_selected, 1/(len(Cum_dist_compare_selected)-1) * np.arange(0, len(Cum_dist_compare_selected) , 1),
where='post', color='black', linewidth=1.5, label='experimental data')
if simPa.barrier:
plt.title('Cumulative contact-time distribution')
else:
plt.title('Cumulative catastrophe time distribution')
plt.xlabel('time [s]')
plt.legend(fontsize=14)
plt.ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.9)
figtext = ['$v_{g} = %.2f \mu m/min$' %float(simPa.growth_rate_one*(60*simPa.dL_dimer))]
figtext.append('$EB = %.2f \mu M$' %float(simPa.EB))
figtext.append('$D_{tip} = %.2f nm^2/s$)' %simPa.D_tip)
figtext.append('Cap unstable when in state "C" ')
figtext.append('in %r out of %r dimer layers.' %(int(simPa.unstable_cap_criteria-simPa.CAP_threshold),int(simPa.unstable_cap_criteria)))
figtext.append('Tip states: B->C with the rates:' )
figtext.append('$k_{hyd} = %.3f s^{-1}$' %simPa.kBC)
figtext.append('dt = %.2f s || V = %.2f um/s' %(simPa.dt, simPa.growth_rate_one*60*simPa.dL_dimer))
figtext.append('actual frame rate = %.2f /s' %simPa.frame_rate_actual)
figDX = 0.045
for m in range(len(figtext)):
plt.ax.text(0.6, 0.65-m*figDX, figtext[m], fontproperties=font,
verticalalignment='bottom', horizontalalignment='left',
transform=plt.ax.transAxes, color='black', fontsize=10)
if simPa.record_data:
file_figure = file_figure + '_fig' + str(int(num_fig))
plt.savefig(file_figure +'.pdf', format='pdf', dpi=1000) #, transparent=True)
plt.savefig(file_figure +'.png', format='png', dpi=200) #, transparent=True )
file_csv = file_figure[:-10] + "EB" + str(simPa.EB*1000)[:-2] + "_" + str(simPa.kBC) + "_" + str(simPa.D_tip) + ".csv"
Cum_dist_pd = pd.DataFrame(np.round(Cum_dist,2))
Cum_dist_pd.to_csv(file_csv, header=None, index=None)
plt.show()
def fig_EB_at_barrier(simPa,
file_figure,
num_fig,
EB_comet_sum,
barrier_contact_times):
""" Plot EB intensity (here = elements in state "B") before and at barrier contact.
Args:
-------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
file_figure: str
Folder for storing figures and data.
num_fig: int
Figure number.
EB_comet_sum: list, array #TODO:check
Number of "B"s during a time window before catastophe.
barrier_contact_times: list, array #TODO:check
List/array containing barrier contact times.
"""
# Select valid runs
valid_runs = valid_EB_runs(simPa, EB_comet_sum, barrier_contact_times)
EB_signal, EB_signal_average, max_barrier_contact_frames, min_length_run_frames, frame_window = analyse_EB_signal(simPa,
EB_comet_sum, barrier_contact_times)
def func(x, a, b, c):
return a * np.exp(-b * x) + c
xdata = simPa.frame_rate_actual * np.arange(0,int(max_barrier_contact_frames/2.5))
ydata = EB_signal_average[(max_barrier_contact_frames-int(max_barrier_contact_frames/2.5)):max_barrier_contact_frames][::-1]
# Make fit:
popt, pcov = curve_fit(func, xdata, ydata, p0=(np.max(ydata), simPa.kBC, 1),maxfev=1000)
norm = np.mean(EB_signal_average[-50:-2])
# Start plotting
if num_fig != 0:
plt.figure(num_fig)
plt.clf()
for a in range(0,len(valid_runs)):
plt.plot(simPa.frame_rate_actual * np.arange(-min_length_run_frames+1,max_barrier_contact_frames+1),
EB_signal[a][0:frame_window][::-1]/norm,color=plt.cm.Reds(0.3+0.7*a/len(valid_runs)))
plt.plot(simPa.frame_rate_actual * np.arange(-min_length_run_frames+1,max_barrier_contact_frames+1),
EB_signal_average[0:frame_window][::-1]/norm,'black', linewidth=3.0)
plt.title("number of B's")
plt.xlabel('time before barrier contact [s]');
plt.ylabel('EB comet intensity');
plt.xlim(-10, 20)
print(popt)
plt.plot(xdata, func(xdata, *popt)/norm, 'c--',)
plt.text(8,1.5*max(ydata/norm),'decay rate (exp. fit): %.2f' %popt[1])
if simPa.record_data:
filename = file_figure + '_fig' + str(int(num_fig))
plt.savefig(filename+'.eps', format='eps', dpi=1000)
plt.savefig(filename+'.png', format='png', dpi=200)
plt.show()
return popt[1]
def fig_EB_before_cat(simPa, file_figure, num_fig, EB_comet_sum, barrier_contact_times=[]):
""" Plot EB intensity (here = elements in state "B") before catastrophe.
Args:
-------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
file_figure: str
Folder for storing figures and data.
num_fig: int
Figure number.
EB_comet_sum: list, array #TODO:check
Number of "B"s during a time window before catastophe.
barrier_contact_times: list, array #TODO:check
List/array containing barrier contact times.
Needed if barrier not False
"""
EB_output = []
min_length_run_frames = int(simPa.min_length_run/simPa.frame_rate_actual)
# Select valid runs
valid_runs = valid_EB_runs(simPa, EB_comet_sum, barrier_contact_times)
EB_signal_before_cat = np.zeros((len(valid_runs), min_length_run_frames+1)) # put individual runs into one np.array
for a in range(0,len(valid_runs)):
EB_signal_before_cat[a][0:min_length_run_frames] = \
np.array(EB_comet_sum[valid_runs[a]])[0:min_length_run_frames]
EB_signal_mean = np.mean(EB_signal_before_cat, axis=0)
EB_signal_std = np.std(EB_signal_before_cat, axis=0)
# 95% confidence intervals
CI_upper = EB_signal_mean + 1.96*(EB_signal_std/np.sqrt(len(valid_runs)))
CI_lower = EB_signal_mean - 1.96*(EB_signal_std/np.sqrt(len(valid_runs)))
# Normalize the EB signal
norm = np.mean(EB_signal_mean[-50:])
# Combine time and mean EB value in single output array
EB_output = np.vstack((CI_upper,CI_lower))
EB_output = np.vstack((EB_signal_mean, EB_output))
EB_output = np.flip(np.delete(EB_output,-1,1), 1)/norm
EB_output = np.vstack((simPa.frame_rate_actual*np.arange(-min_length_run_frames,0),EB_output)).T
# Start plotting
if num_fig != 0:
plt.figure(num_fig)
plt.clf()
plt.plot(simPa.frame_rate_actual * np.arange(-min_length_run_frames,0),
EB_signal_mean[0:min_length_run_frames][::-1]/norm,'red', linewidth=2.0)
for a in range(0,len(valid_runs)):
plt.plot(simPa.frame_rate_actual * np.arange(-min_length_run_frames,0),
EB_signal_before_cat[a][0:min_length_run_frames][::-1]/norm,color=plt.cm.Reds(0.3+0.7*a/len(valid_runs)))
plt.plot(simPa.frame_rate_actual * np.arange(-min_length_run_frames,0),
EB_signal_mean[0:min_length_run_frames][::-1]/norm,'black', linewidth=3.0)
plt.title("Mean GTP/GDP-Pi prior to catastrophe")
plt.xlabel('time before catastrophe [s]');
plt.ylabel('GTP/GDP-Pi');
if simPa.record_data:
filename = file_figure + '_fig' + str(int(num_fig))
plt.savefig(filename+'.eps', format='eps', dpi=1000)
plt.savefig(filename+'.png', format='png', dpi=200)
plt.show()
return EB_output
def fig_MT_before_cat(simPa,
file_figure,
num_fig,
MT_length_sum,
cap_end_sum):
""" Figure showing tip and cap position before catastrope.
Args:
-------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
file_figure: str
Folder for storing figures and data.
num_fig: int
Figure number.
MT_length_sum:
TODO
cap_end_sum:
TODO
"""
# Remove invalid growth events from dataset
index = np.sum(MT_length_sum, axis=1)
index = np.where(index == 0)[0]
MT_length = np.delete(MT_length_sum, index, 0)
C_length = np.delete(cap_end_sum, index, 0)
# Normalize length to max position
MT_length_norm = (MT_length.T - np.max(MT_length, axis=1)).T
C_length_norm = (C_length.T - np.max(C_length, axis=1)).T
# Calculate mean and error MT tip position
MT_length_mean = np.mean(MT_length_norm, axis=0)*(simPa.dL_dimer*1000)
MT_length_mean = MT_length_mean + abs(np.max(MT_length_mean))
MT_length_std = np.std(MT_length_norm, axis=0)
# Calculate mean and error cap end position
C_length_mean = np.mean(C_length_norm, axis=0)*(simPa.dL_dimer*1000)
C_length_mean = C_length_mean + abs(np.max(C_length_mean))
C_length_std = np.std(C_length_norm, axis=0)
# Ensure that tip and cap coincide at moment of catastrophe
C_length_mean = C_length_mean + MT_length_mean[-1]
# Calculate 95% confidence intervals of tip position
MT_CI_upper = MT_length_mean + (1.96 * (MT_length_std/np.sqrt(MT_length.shape[0])))
MT_CI_lower = MT_length_mean - (1.96 * (MT_length_std/np.sqrt(MT_length.shape[0])))
# MT_CI_upper = MT_length_mean + MT_length_std
# MT_CI_lower = MT_length_mean - MT_length_std
# Calculate 95% confidence intervals of cap end position
C_CI_upper = C_length_mean + (1.96 * (C_length_std/np.sqrt(C_length.shape[0])))
C_CI_lower = C_length_mean - (1.96 * (C_length_std/np.sqrt(C_length.shape[0])))
# C_CI_upper = C_length_mean + C_length_std
# C_CI_lower = C_length_mean - C_length_std
# Calculate pausing duration prior to catastrophe (def: < 10% mean growth speed)
MT_length_diff = np.diff(MT_length_mean) / simPa.frame_rate_actual
Pausing = np.where(MT_length_diff < 0.1*simPa.growth_speed*1000/60)[0][0]
Pausing = (MT_length.shape[1] - Pausing)*simPa.frame_rate_actual
# Tip position output
MT_length_output = np.vstack((MT_CI_upper, MT_CI_lower))
MT_length_output = np.vstack((MT_length_mean, MT_length_output))
MT_length_output = np.vstack((simPa.frame_rate_actual * np.arange(-len(MT_length_mean),0), MT_length_output)).T
# Cap position output
C_length_output = np.vstack((C_CI_upper, C_CI_lower))
C_length_output = np.vstack((C_length_mean, C_length_output))
C_length_output = np.vstack((simPa.frame_rate_actual * np.arange(-len(C_length_mean),0), C_length_output)).T
#Start plotting
if num_fig != 0:
plt.figure(num_fig)
plt.clf()
# Plot mean tip position
plt.plot(simPa.frame_rate_actual * np.arange(-len(MT_length_mean),0),
MT_length_mean,'k', linewidth=2.0)
# Plot upper 95% confidence interval of tip position
plt.plot(simPa.frame_rate_actual * np.arange(-len(MT_length_mean),0),
MT_CI_upper,'k--', linewidth=1.0)
# Plot lower 95% confidence interval of tip position
plt.plot(simPa.frame_rate_actual * np.arange(-len(MT_length_mean),0),
MT_CI_lower,'k--', linewidth=1.0)
# Plot mean cap position
plt.plot(simPa.frame_rate_actual * np.arange(-len(C_length_mean),0),
C_length_mean,'r', linewidth=2.0)
# Plot upper 95% confidence interval of cap position
plt.plot(simPa.frame_rate_actual * np.arange(-len(C_length_mean),0),
C_CI_upper,'r--', linewidth=1.0)
# Plot lower 95% confidence interval of cap position
plt.plot(simPa.frame_rate_actual * np.arange(-len(C_length_mean),0),
C_CI_lower,'r--', linewidth=1.0)
plt.title("MT length prior to catastrophe")
plt.xlabel('time before catastrophe [s]');
plt.ylabel('Mean MT position [nm]');
#Add parameters to figure
figtext = ['Shrinkage = %.2f nm' %abs(MT_length_mean[-1])]
figtext.append('Pausing = %.2f s' %Pausing)
## Figure styles
from matplotlib.font_manager import FontProperties
font = FontProperties()
font.set_family('sans-serif')
font.set_style('normal')
font.set_weight('light')
figDX = 0.045
for m in range(len(figtext)):
plt.ax.text(0.1,0.82-m*figDX, figtext[m], fontproperties=font,
verticalalignment='bottom', horizontalalignment='left',
transform=plt.ax.transAxes, color='black', fontsize=11)
plt.xlim(-30, 2)
if simPa.record_data:
filename = file_figure + '_fig' + str(int(num_fig))
plt.savefig(filename+'.eps', format='eps', dpi=1000)
plt.savefig(filename+'.png', format='png', dpi=200)
plt.show()
print('Mean shrinkage before catastrophe = %.2f nm' %abs(MT_length_mean[-1]))
print('Mean pause duration before catastrophe = %.2f s' %Pausing)
return MT_length_output, C_length_output
def fig_EB_cat_hist(simPa,
file_figure,
num_fig,
EB_comet_sum,
barrier_contact_times,
EB_average_frames = 2):
""" Have a look at EB intensity at catastrophe...
Args:
-------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
file_figure: str
Folder for storing figures and data.
num_fig: int
Figure number.
EB_comet_sum: list, array #TODO:check
Number of "B"s during a time window before catastophe.
barrier_contact_times: list, array #TODO:check
List/array containing barrier contact times.
EB_average_frames: int
Number of frames to average over. Default = 2.
"""
EB_intensity_before_cat = []
EB_intensity_at_barrier = []
EB_mean = []
# Select valid runs
valid_runs = valid_EB_runs(simPa, EB_comet_sum, barrier_contact_times)
EB_signal, EB_signal_average, max_barrier_contact_frames, min_length_run_frames, frame_window = analyse_EB_signal(simPa,
EB_comet_sum, barrier_contact_times)
for a in range(0,len(valid_runs)):
EB_intensity_before_cat.append(np.mean(np.array(EB_comet_sum[a])[0:(EB_average_frames+1)])) # :-1]))
barrier_contact_frame = int(round(barrier_contact_times[valid_runs[a]]/simPa.frame_rate_actual,0))
EB_intensity_at_barrier.append(np.mean(np.array(EB_comet_sum[a])[barrier_contact_frame:(barrier_contact_frame+EB_average_frames+1)]))
EB_mean.append(np.mean(EB_signal[a][max_barrier_contact_frames:frame_window]))
fig, ax = plt.subplots(figsize=(8, 8)) #figure(9, figsize=(8, 8))
plt.clf()
map = plt.scatter(EB_intensity_before_cat/np.mean(EB_mean), EB_intensity_at_barrier/np.mean(EB_mean),
c = barrier_contact_times[valid_runs], alpha=0.5, cmap='CMRmap')
plt.xlim(xmax=1)
fig.colorbar(map, ax = ax, label = 'barrier contact time [s]')
plt.title('EB intensity before catastrophe (%.2f nM EB)' %(simPa.EB*1000))
plt.xlabel('EB intensity right before catastrophe (last %.0f frames), relative to mean' %EB_average_frames)
plt.ylabel('EB intensity right before barrier contact, relative to mean')
plt.legend(fontsize=14)
if simPa.record_data:
filename = file_figure + '_fig' + str(num_fig) + '_relative'
plt.savefig(filename+'.eps', format='eps', dpi=1000)
plt.savefig(filename+'.png', format='png', dpi=200)
plt.show()
fig, ax = plt.subplots(figsize=(8, 8)) #figure(9, figsize=(8, 8))
plt.clf()
hist_data, hist_bins = np.histogram(EB_intensity_before_cat/np.mean(EB_mean), np.arange(0,1.1,0.1))
bin_width = hist_bins[1]
plt.bar((hist_bins[:-1] + bin_width/2) , np.float_(hist_data)/(np.sum(hist_data)), 0.9*bin_width, alpha=0.8)
plt.title('Relative B-state intensity at catastrophe')
plt.xlabel('Relative B-state intensity (#of elements in state "B" div. by mean)')
plt.ylabel('Probability')
if simPa.record_data:
filename = file_figure + '_fig' + str(num_fig) + '_histogram'
plt.savefig(filename+'.eps', format='eps', dpi=1000)
plt.savefig(filename+'.png', format='png', dpi=200)
#PROBLEM with plt.hist --> normed=1 and density=1 don't work properly
#plt.hist(EB_intensity_before_cat/np.mean(EB_mean), np.arange(0,1.1,0.1), density=True, histtype='bar', rwidth=0.8)
plt.show()
return EB_intensity_before_cat/np.mean(EB_mean)
def fig_display_examples(simPa,
file_figure,
num_fig,
MT_length_sum,
catastrophe_times,
EB_comet_sum,
barrier_contact_times=[]):
""" Show selection of examples (tip position + EB intensity)
Args:
-------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
file_figure: str
Folder for storing figures and data.
num_fig: int
Figure number.
MT_length_sum:
TODO
catastrophe_times:
TODO
EB_comet_sum: list, array #TODO:check
Number of "B"s during a time window before catastophe.
barrier_contact_times: list, array #TODO:check
List/array containing barrier contact times.
"""
min_length_run_frames = int(simPa.min_length_run/simPa.frame_rate_actual)
# Select valid runs
valid_runs = valid_EB_runs(simPa, EB_comet_sum, barrier_contact_times)
EB_signal_before_cat = np.zeros((len(valid_runs), min_length_run_frames+1)) #put individual runs into one np.array
for a in range(0,len(valid_runs)):
EB_signal_before_cat[a][0:min_length_run_frames] = \
np.array(EB_comet_sum[valid_runs[a]])[0:min_length_run_frames]
EB_signal_average = np.sum(EB_signal_before_cat, axis=0)
EB_signal_average = EB_signal_average/len(valid_runs)
show_fraction_frames = int(simPa.show_fraction/simPa.frame_rate_actual)
valid_runs = np.where(catastrophe_times > simPa.show_fraction)[0]
plt.figure(num_fig, figsize=(15, 10))
plt.clf()
f, axarr = plt.subplots(nrows=5, ncols=5, sharey=True, sharex=True, figsize=(15, 10))
for m in range(0,5):
for n in range(0,5):
skip = 0
axarr[m, n].plot(simPa.frame_rate_actual * np.arange(-min_length_run_frames,0) ,MT_length_sum[valid_runs[skip+m+5*n]][0::], 'black')
axarr[m, n].set_title('catastrophe %.0f' %(skip+m+5*n))
axarr[m, n]
plt.plot(simPa.frame_rate_actual * np.arange(-min_length_run_frames,0) ,EB_signal_before_cat[skip+m+5*n][0:show_fraction_frames][::-1],'red')
if simPa.record_data:
filename = file_figure + '_fig' + str(int(num_fig))
plt.savefig(filename+'.eps', format='eps', dpi=1000)
plt.savefig(filename+'.png', format='png', dpi=200)
plt.show()
def fig_EB_profile(simPa, file_figure, num_fig, EB_profiles, MT_length_full, w_size):
""" Figure to display EB profile.
Args:
-------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
file_figure: str
Folder for storing figures and data.
num_fig: int
Figure number.
EB_profiles:
TODO
MT_length_full:
TODO
w_size:
TODO
"""
# Analyse the simulated EB profiles
EB_mean, v_mean = analyse_EB_profile(simPa, MT_length_full, EB_profiles, w_size)
# Calculate the mean EB profile
# x = np.arange(0, len(EB_mean[0]), 1) * simPa.dL_dimer *1000
# y = np.mean(EB_mean, axis=0)
x = np.arange(0, len(EB_mean), 1) * simPa.dL_dimer *1000
y = EB_mean
#plt.plot(x, y)
# Define exponential function
def exponenial_func(x, a, b):
return a*np.exp(b*x)
# Calculate the maturation rate (Duellberg, 2016), i.e. the hydrolysis rate
ind = np.argmax(y)
popt, pcov = curve_fit(exponenial_func, x[0:ind], y[0:ind], p0=(1e-2, 1e-3))
xx= np.linspace(0, x[ind], 1000)
yy = exponenial_func(xx, *popt)
# Align profile from left to right and set tip position to zero
x = -x
x += np.round(np.argmax(y)*(simPa.dL_dimer*1000))
xx = -xx
xx += np.round(np.argmax(y)*(simPa.dL_dimer*1000))
fig = plt.figure(1, figsize=(12, 7))
plt.clf()
plt.plot(x,y,'k.', xx, yy, '--r')
plt.title('Mean GTP/GDP-Pi profile', fontsize=14)
plt.xlabel('Position [nm]', fontsize=12)
plt.ylabel('Intensity [a.u.]', fontsize = 12)
plt.ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.9)
## Figure styles
from matplotlib.font_manager import FontProperties
font = FontProperties()
font.set_family('sans-serif')
font.set_style('normal')
font.set_weight('light')
#Add parameters to figure
figtext = ['Simulation parameters:']
figtext.append('$N_{sim} = %.0f$' %len(MT_length_full))
figtext.append('$v_{g} = %.2f$ $nm/s$' %float(simPa.growth_rate_one*(simPa.dL_dimer*1000)))
figtext.append('$k_{hyd} = %.2f$ $s^{-1}$' %(simPa.kBC))
figtext.append('$D_{tip} = %.0f$ $nm^{2}/s$' %simPa.D_tip)
figtext.append('')
figtext.append('Measured values:')
figtext.append('$N_{profiles} = %.0f$' %len(EB_mean))
figtext.append('$v_{g} = %.2f$ $nm/s$' %np.mean(v_mean))
figtext.append('$L_{comet} = %.0f$ $nm$' %float(1/popt[1]))
figtext.append('$k_{m} = %.2f$ $s^{-1}$' %float(np.mean(v_mean)*popt[1]))
figtext.append('$I_{max} = %.2f$ $a.u.$' %float(np.max(y)))
figDX = 0.045
for m in range(len(figtext)):
plt.ax.text(0.75, 0.9-m*figDX, figtext[m], fontproperties=font,
verticalalignment='bottom', horizontalalignment='left',
transform=plt.ax.transAxes, color='black', fontsize=12)
if simPa.record_data:
filename = file_figure + '_fig' + str(int(num_fig))
plt.savefig(filename+'.eps', format='eps', dpi=1000)
plt.savefig(filename+'.png', format='png', dpi=200)
plt.show()
def fig_MT_ageing(simPa, file_figure, num_fig, c_times):
""" Calculate the age-dependent microtubule catastrophe frequency
Args:
-------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
file_figure: str
Folder for storing figures and data.
num_fig: int
Figure number.
c_times: numpy.array
Array of catastrophe times.
"""
X_dist = np.sort(c_times)
Y_dist = np.cumsum(np.ones(len(c_times)))/len(c_times)
f = (Y_dist/X_dist)/(1-Y_dist)
C_freq = np.vstack((X_dist,Y_dist)).T
plt.figure(1, figsize=(12, 7))
plt.clf()
plt.plot(X_dist, f)
plt.ylim(0, 0.02)
plt.xlim(0, 400)
plt.title('Microtubule ageing', fontsize=14)
plt.xlabel('Microtubule age [s]', fontsize=12)
plt.ylabel('Catastrophe frequency [$s^{-1}$]', fontsize = 12)
if simPa.record_data:
filename = file_figure + '_fig' + str(int(num_fig))
plt.savefig(filename+'.eps', format='eps', dpi=1000)
plt.savefig(filename+'.png', format='png', dpi=200)
plt.show()
return C_freq
def fig_dist_fit(simPa,
file_figure,
num_fig,
Cum_dist):
"""
Args:
-------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
file_figure: str
Folder for storing figures and data.
num_fig: int
Figure number.
Cum_dist:
TODO
"""
fig, ax1 = plt.subplots(figsize=(12, 7))
plt.clf()
if isinstance(Cum_dist, list) and list_dim(Cum_dist) > 1 and list_dim(Cum_dist[0]) > 0:
if isinstance(Cum_dist[0], np.ndarray):
print(list_dim(Cum_dist), ' different cumulative distributions found. ')
else:
print('Error: Input cumulative distributions must be numpy arrays or lists of numpy arrays.' )
elif isinstance(Cum_dist, list) and list_dim(Cum_dist) == 1 and isinstance(Cum_dist[0], np.ndarray):
pass;
elif isinstance(Cum_dist, np.ndarray):
Cum_dist = [Cum_dist] #put numpy array into list
else:
print('Error: Input cumulative distributions must be numpy arrays or lists of numpy arrays.' )
x = Cum_dist[0]
x_fit = np.linspace(0,x[-1],1000)
y = np.linspace(0,1,len(x))
# Fit cumulative distribution to the Gamma function
popt1, pcov1 = curve_fit(gamma_cdf,x, y, p0=(1, 1e-2))
y1 = gamma_cdf(x_fit, *popt1)
print(popt1)
print(pcov1)
# Fit cumulative distribution to an exponential
popt2, pcov2 = curve_fit(exp_cdf, x, y, p0=(1e-2))
y2 = exp_cdf(x_fit, *popt2)
if list_dim(Cum_dist) > 1:
c_range = 1/(list_dim(Cum_dist)-1)
else:
c_range = 1
for i, Cum_dist in enumerate(Cum_dist):
plt.step(Cum_dist, 1/(len(Cum_dist)-1) * np.arange(0, len(Cum_dist) , 1),
where='post', color=(0.95-0.7*(i)*c_range, 0.1, 0.1 + 0.8*(i)*c_range), linewidth=1.5, label='Simulation')
plt.plot(x_fit, y1, 'k--', linewidth=1.5, label='Gamma fit')
plt.plot(x_fit, y2, 'k:', linewidth=1.5, label='Exponential fit')
plt.title('Microtubule lifetime distribution', fontsize=14)
plt.xlabel('time [s]')
plt.ylabel('Cumulative fraction')
plt.ax = fig.add_subplot(111)
figtext = ['Simulation parameters:']
figtext.append('$v_{g} = %.1f$ $nm/s$' %float(simPa.growth_rate_one*(simPa.dL_dimer*1000)))
figtext.append('$k_{hyd} = %.2f$ $s^{-1}$' %(simPa.kBC))
if simPa.D_tip_time:
figtext.append('$k_{ageing} = %.3f$ $s^{-1}$' %(simPa.D_tip_rate_T))
elif simPa.D_tip_length:
figtext.append('$k_{ageing} = %.3f$ $s^{-1}$' %(simPa.D_tip_rate_L))
figtext.append('Gamma fit parameters:')
figtext.append('$steps = %.2f$' %popt1[0])
figtext.append('$rate = %.3f$ $s^{-1}$' %popt1[1])
figDX = 0.045
for m in range(len(figtext)):
plt.ax.text(0.7, 0.82-m*figDX, figtext[m], fontproperties=font,
verticalalignment='bottom', horizontalalignment='left',
transform=plt.ax.transAxes, color='black', fontsize=11)
plt.legend(fontsize=12)
# Plot insert with microtubule ageing parameters
left, bottom, width, height = [0.6, 0.21, 0.25, 0.25]
ax2 = fig.add_axes([left, bottom, width, height])
# Calculate the evolution of tip diffusion
D_tip = []
if simPa.D_tip_time:
for i in range(len(x_fit)):
D_tip.append((simPa.D_tip_end - simPa.D_tip_start) * (1 - np.exp(-1*simPa.D_tip_rate_T*(x_fit[i]))) + simPa.D_tip_start)
elif simPa.D_tip_length:
x_fit = np.linspace(0,50000,50000)*simPa.dL_dimer
for i in range(len(x_fit)):
D_tip.append((simPa.D_tip_end - simPa.D_tip_start) * (1 - np.exp(-1*simPa.D_tip_rate_L*(x_fit[i]))) + simPa.D_tip_start)
else:
D_tip = np.ones(len(x_fit))*simPa.D_tip
# Plot tip diffusion
ax2.plot(x_fit, D_tip, 'k')
ax2.set_ylim(0, 1.1*np.max(D_tip))
if simPa.D_tip_time:
ax2.set_xlabel('time [s]')
elif simPa.D_tip_length:
ax2.set_xlabel('length [um]')
ax2.set_ylabel('Tip diffusion [$nm^{2}s^{-1}$]')
ax2.set_title('Microtubule ageing')
if simPa.record_data:
filename = file_figure + '_fig' + str(int(num_fig))
plt.savefig(filename+'.eps', format='eps', dpi=1000)
plt.savefig(filename+'.png', format='png', dpi=200)
plt.show()
def fig_nucleation(simPa,
file_figure,
num_fig,
nucleation_times):
""" Figure showing nucleation time histogram.
Args:
-------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
file_figure: str
Folder for storing figures and data.
num_fig: int
Figure number.
nucleation_times: list, array
List, array of nucleation times.
"""
plt.subplots(figsize=(12, 7))
plt.clf()
plt.hist(nucleation_times, bins='auto')
plt.xlabel('time [s]')
plt.ylabel('Counts')
plt.title('Nucleation time', fontsize=14)
plt.show()
def fig_washout(simPa,
file_figure,
num_fig,
washout_times,
catastrophe_washout,
MT_length_sum):
""" Figure showing MT behavior for washout experiment.
Args:
-------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
file_figure: str
Folder for storing figures and data.
num_fig: int
Figure number.
washout_times: numpy.array, list
List/array of washout time points.
catastrophe_washout:
TODO: Maurits --> add description
"""
# Plot the washout delay times
fig, axs = plt.subplots(1,3, figsize=(14,7))
axs[0].hist(washout_times, bins='auto')
axs[0].set_xlabel('delay time [s]')
axs[0].set_ylabel('counts')
axs[0].set_xlim(left=0)
# Calculate the shrinkage length
shrinkage_length = []
delay_steps = np.round(washout_times / simPa.dt)
delay_steps = delay_steps.astype(np.int16)
for i in range(len(catastrophe_washout)):
shrinkage_length.append((MT_length_sum[catastrophe_washout[i],-delay_steps[i]] - MT_length_sum[catastrophe_washout[i],-1])*simPa.dL_dimer*1000)
# Plot the shrinkage length
axs[1].hist(shrinkage_length, bins='auto')
axs[1].set_xlabel('shrinkage length [nm]')
axs[1].set_ylabel('counts')
axs[1].set_xlim(left=0)
# Calculate the shrinkage speeds
axs[2].hist(shrinkage_length/washout_times,bins='auto')
# Plot the shrinkage speed
axs[2].set_xlabel('shrinkage speed [nm/s]')
axs[2].set_ylabel('counts')
if simPa.record_data:
filename = file_figure + '_fig' + str(int(num_fig))
plt.savefig(filename+'.eps', format='eps', dpi=1000)
plt.savefig(filename+'.png', format='png', dpi=200)
def fig_cap_size_before_cat(simPa,
file_figure,
num_fig,
MT_length_full,
cap_end):
""" Figure showing cap size before catastrophe.
Args:
-------
simPa: parameter set
Simulation parameters in "ParameterSet" format.
file_figure: str
Folder for storing figures and data.
num_fig: int
Figure number.
MT_length_full:
TODO
cap_end:
TODO
"""
min_length_run_frames = int(simPa.min_length_run/simPa.frame_rate_actual)
MT_run_length = []
for i in range(len(cap_end)):
MT_run_length.append(len(cap_end[i]))
valid_runs = np.where(np.array(MT_run_length) > min_length_run_frames)[0]
cap_before_cat = np.zeros((len(valid_runs), min_length_run_frames+1)) #put individual runs into one np.array
for a in range(0,len(valid_runs)):
cap_before_cat[a][0:min_length_run_frames] = \
np.array(MT_length_full[valid_runs[a]])[-min_length_run_frames:] - | np.array(cap_end[valid_runs[a]]) | numpy.array |
# -*- coding: utf-8 -*-
"""
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
@author: <NAME> (sghosh) (started Feb 2018)
@author: <NAME> (alepros), Technical University of Denmark
"""
from time import time
import numpy as np
from numpy import flatnonzero as find, pi, exp
from pandapower import LoadflowNotConverged
from pandapower.pypower.pfsoln import pfsoln
from pandapower.pypower.idx_gen import PG, QG
from pandapower.pd2ppc import _pd2ppc
from pandapower.pypower.makeYbus import makeYbus
from pandapower.pypower.idx_bus import GS, BS, PD , QD
from pandapower.auxiliary import _sum_by_group, _check_if_numba_is_installed,\
_check_bus_index_and_print_warning_if_high,\
_check_gen_index_and_print_warning_if_high, \
_add_pf_options, _add_ppc_options, _clean_up, sequence_to_phase, \
phase_to_sequence, X012_to_X0, X012_to_X2, \
I1_from_V012, S_from_VI_elementwise, V1_from_ppc, V_from_I,\
combine_X012, I0_from_V012, I2_from_V012, ppException
from pandapower.pf.run_newton_raphson_pf import _run_newton_raphson_pf
from pandapower.build_bus import _add_ext_grid_sc_impedance
from pandapower.pypower.bustypes import bustypes
from pandapower.run import _passed_runpp_parameters
from pandapower.pypower.idx_bus import VM, VA
from pandapower.pypower.idx_gen import GEN_BUS, GEN_STATUS, VG
from pandapower.results import _copy_results_ppci_to_ppc, _extract_results_3ph,\
init_results
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
class Not_implemented(ppException):
"""
Exception being raised in case loadflow did not converge.
"""
pass
def _get_pf_variables_from_ppci(ppci):
"""
Used for getting values for pfsoln function in one convinient function
"""
# default arguments
if ppci is None:
ValueError('ppci is empty')
# get data for calc
base_mva, bus, gen, branch = \
ppci["baseMVA"], ppci["bus"], ppci["gen"], ppci["branch"]
# get bus index lists of each type of bus
ref, pv, pq = bustypes(bus, gen)
# generator info
on = find(gen[:, GEN_STATUS] > 0) # which generators are on?
gbus = gen[on, GEN_BUS].astype(int) # what buses are they at?
# initial state
v0 = bus[:, VM] * exp(1j * pi / 180 * bus[:, VA])
v0[gbus] = gen[on, VG] / abs(v0[gbus]) * v0[gbus]
ref_gens = ppci["internal"]["ref_gens"]
return base_mva, bus, gen, branch, ref, pv, pq, on, gbus, v0, ref_gens
def _store_results_from_pf_in_ppci(ppci, bus, gen, branch):
ppci["bus"], ppci["gen"], ppci["branch"] = bus, gen, branch
return ppci
def _get_elements(params,net,element,phase,typ):
sign = -1 if element.endswith("sgen") else 1
elm = net[element].values
# # Trying to find the column no for using numpy filters for active loads
scaling = net[element].columns.get_loc("scaling")
typ_col = net[element].columns.get_loc("type") # Type = Delta or Wye load
# active wye or active delta row selection
active = (net["_is_elements"][element]) & (elm[:,typ_col] == typ)
bus = [net[element].columns.get_loc("bus")]
if len(elm):
if element == 'load' or element == 'sgen':
vl = elm[active,scaling].ravel()
p_mw = [net[element].columns.get_loc("p_mw")]
q_mvar = [net[element].columns.get_loc("q_mvar")]
params['p'+phase+typ] = np.hstack([params['p'+phase+typ],
elm[active,p_mw]/3 * vl * sign])
params['q'+phase+typ] = np.hstack([params['q'+phase+typ],
(elm[active,q_mvar]/3) * vl * sign])
params['b'+typ] = np.hstack([params['b'+typ],
elm[active,bus].astype(int)])
elif element.startswith('asymmetric'):
vl = elm[active,scaling].ravel()
p = {'a': net[element].columns.get_loc("p_a_mw")
,'b': net[element].columns.get_loc("p_b_mw")
,'c': net[element].columns.get_loc("p_c_mw")
}
q = {'a' : net[element].columns.get_loc("q_a_mvar")
,'b' : net[element].columns.get_loc("q_b_mvar")
,'c' : net[element].columns.get_loc("q_c_mvar")
}
params['p'+phase+typ] = np.hstack([params['p'+phase+typ],
elm[active,p[phase]] * vl * sign])
params['q'+phase+typ] = np.hstack([params['q'+phase+typ],
elm[active,q[phase]] * vl * sign])
params['b'+typ] = np.hstack([params['b'+typ],
elm[active,bus].astype(int)])
return params
def _load_mapping(net, ppci1):
"""
Takes three phase P, Q values from PQ elements
sums them up for each bus
maps them in ppc bus order and forms s_abc matrix
"""
bus_lookup = net["_pd2ppc_lookups"]["bus"]
params = dict()
phases = ['a', 'b', 'c']
load_types = ['wye', 'delta']
load_elements = ['load', 'asymmetric_load', 'sgen', 'asymmetric_sgen']
# =============================================================================
# Loop to initialize and feed s_abc wye and delta values
# =============================================================================
for phase in phases:
for typ in load_types:
params['S'+phase+typ] = (ppci1["bus"][:, PD] +
ppci1["bus"][:, QD]*1j)*0
params['p'+phase+typ] = np.array([]) # p values from loads/sgens
params['q'+phase+typ] = np.array([]) # q values from loads/sgens
params['P'+phase+typ] = np.array([]) # Aggregated Active Power
params['Q'+phase+typ] = np.array([]) # Aggregated reactive Power
params['b'+phase+typ] = np.array([], dtype=int) # bus map for phases
params['b'+typ] = np.array([], dtype=int) # aggregated bus map(s_abc)
for element in load_elements:
_get_elements(params,net,element,phase,typ)
# Mapping constant power loads to buses
if params['b'+typ].size:
params['b'+phase+typ] = bus_lookup[params['b'+typ]]
params['b'+phase+typ], params['P'+phase+typ],\
params['Q'+phase+typ] = _sum_by_group(params['b'+phase+typ],
params['p'+phase+typ],
params['q'+phase+typ] * 1j)
params['S'+phase+typ][params['b'+phase+typ]] = \
(params['P'+phase+typ] + params['Q'+phase+typ])
Sabc_del = np.vstack((params['Sadelta'],params['Sbdelta'],params['Scdelta']))
Sabc_wye = np.vstack((params['Sawye'],params['Sbwye'],params['Scwye']))
# last return varaible left for constant impedance loads
return Sabc_del, Sabc_wye
# =============================================================================
# 3 phase algorithm function
# =============================================================================
def runpp_3ph(net, calculate_voltage_angles=True, init="auto",
max_iteration="auto", tolerance_mva=1e-8, trafo_model='t',
trafo_loading="current", enforce_q_lims=False, numba=True,
recycle=None, check_connectivity=True, switch_rx_ratio=2.0,
delta_q=0, v_debug=False, **kwargs):
"""
runpp_3ph: Performs Unbalanced/Asymmetric/Three Phase Load flow
INPUT:
**net** - The pandapower format network
OPTIONAL:
**algorithm** (str, "nr") - algorithm that is used to solve the power
flow problem.
The following algorithms are available:
- "nr" Newton-Raphson (pypower implementation with numba
accelerations)
Used only for positive sequence network
Zero and Negative sequence networks use Current Injection method
Vnew = Y.inv * Ispecified ( from s_abc/v_abc old)
Icalculated = Y * Vnew
**calculate_voltage_angles** (bool, "auto") - consider voltage angles
in loadflow calculation
If True, voltage angles of ext_grids and transformer shifts are
considered in the loadflow calculation. Considering the voltage
angles is only necessary in meshed networks that are usually
found in higher voltage levels. calculate_voltage_angles
in "auto" mode defaults to:
- True, if the network voltage level is above 70 kV
- False otherwise
The network voltage level is defined as the maximum rated voltage
of any bus in the network that is connected to a line.
**max_iteration** (int, "auto") - maximum number of iterations carried
out in the power flow algorithm.
In "auto" mode, the default value depends on the power flow solver:
- 10 for "nr"
For three phase calculations, its extended to 3 * max_iteration
**tolerance_mva** (float, 1e-8) - loadflow termination condition
referring to P / Q mismatch of node power in MVA
**trafo_model**
- transformer equivalent models
- "t" - transformer is modeled as equivalent with the T-model.
- "pi" - This is not recommended, since it is less exact than the T-model.
So, for three phase load flow, its not
implemented
**trafo_loading** (str, "current") - mode of calculation for
transformer loading
Transformer loading can be calculated relative to the rated
current or the rated power. In both cases the overall transformer
loading is defined as the maximum loading on the two sides of
the transformer.
- "current"- transformer loading is given as ratio of current
flow and rated current of the transformer. This is the recommended
setting, since thermal as well as magnetic effects in the
transformer depend on the current.
- "power" - transformer loading is given as ratio of apparent
power flow to the rated apparent power of the transformer.
**enforce_q_lims** (bool, False)
(Not tested with 3 Phase load flow) - respect generator reactive power
limits
If True, the reactive power limits in net.gen.max_q_mvar/min_q_mvar
are respected in the loadflow. This is done by running a second
loadflow if reactive power limits are violated at any generator,
so that the runtime for the loadflow will increase if reactive
power has to be curtailed.
Note: enforce_q_lims only works if algorithm="nr"!
**check_connectivity** (bool, True) - Perform an extra connectivity
test after the conversion from pandapower to PYPOWER
If True, an extra connectivity test based on SciPy Compressed
Sparse Graph Routines is perfomed. If check finds unsupplied buses,
they are set out of service in the ppc
**voltage_depend_loads** (bool, True)
(Not tested with 3 Phase load flow) - consideration of
voltage-dependent loads. If False, net.load.const_z_percent and
net.load.const_i_percent are not considered, i.e. net.load.p_mw and
net.load.q_mvar are considered as constant-power loads.
**consider_line_temperature** (bool, False)
(Not tested with 3 Phase load flow) - adjustment of line
impedance based on provided line temperature. If True, net.line must
contain a column "temperature_degree_celsius". The temperature
dependency coefficient alpha must be provided in the net.line.alpha
column, otherwise the default value of 0.004 is used
**KWARGS:
**numba** (bool, True) - Activation of numba JIT compiler in the
newton solver
If set to True, the numba JIT compiler is used to generate
matrices for the powerflow, which leads to significant speed
improvements.
**switch_rx_ratio** (float, 2)
(Not tested with 3 Phase load flow) - rx_ratio of bus-bus-switches.
If impedance is zero, buses connected by a closed bus-bus switch
are fused to model an ideal bus. Otherwise, they are modelled
as branches with resistance defined as z_ohm column in switch
table and this parameter
**delta_q**
(Not tested with 3 Phase load flow) - Reactive power tolerance for option "enforce_q_lims"
in kvar - helps convergence in some cases.
**trafo3w_losses**
(Not tested with 3 Phase load flow) - defines where open loop losses of three-winding
transformers are considered. Valid options are "hv", "mv", "lv"
for HV/MV/LV side or "star" for the star point.
**v_debug** (bool, False)
(Not tested with 3 Phase load flow) - if True, voltage values in each
newton-raphson iteration are logged in the ppc
**init_vm_pu** (string/float/array/Series, None)
(Not tested with 3 Phase load flow) - Allows to define
initialization specifically for voltage magnitudes.
Only works with init == "auto"!
- "auto": all buses are initialized with the mean value of all
voltage controlled elements in the grid
- "flat" for flat start from 1.0
- "results": voltage magnitude vector is taken from result table
- a float with which all voltage magnitudes are initialized
- an iterable with a voltage magnitude value for each bus
(length and order has to match with the buses in net.bus)
- a pandas Series with a voltage magnitude value for each bus
(indexes have to match the indexes in net.bus)
**init_va_degree** (string/float/array/Series, None)
(Not tested with 3 Phase load flow)-
Allows to define initialization specifically for voltage angles.
Only works with init == "auto"!
- "auto": voltage angles are initialized from DC power flow
if angles are calculated or as 0 otherwise
- "dc": voltage angles are initialized from DC power flow
- "flat" for flat start from 0
- "results": voltage angle vector is taken from result table
- a float with which all voltage angles are initialized
- an iterable with a voltage angle value for each bus (length
and order has to match with the buses in net.bus)
- a pandas Series with a voltage angle value for each bus (indexes
have to match the indexes in net.bus)
**recycle** (dict, none)
(Not tested with 3 Phase load flow) - Reuse of internal powerflow variables for
time series calculation
Contains a dict with the following parameters:
_is_elements: If True in service elements are not filtered again
and are taken from the last result in net["_is_elements"]
ppc: If True the ppc is taken from net["_ppc"] and gets updated
instead of reconstructed entirely
Ybus: If True the admittance matrix (Ybus, Yf, Yt) is taken from
ppc["internal"] and not reconstructed
**neglect_open_switch_branches** (bool, False)
(Not tested with 3 Phase load flow) - If True no auxiliary
buses are created for branches when switches are opened at the branch.
Instead branches are set out of service
Return values:
---------------
**count(int)** No of iterations taken to reach convergence
**v_012_it(complex)** - Sequence voltages
**i012_it(complex)** - Sequence currents
See Also:
----------
pp.add_zero_impedance_parameters(net):
To add zero sequence parameters into network from the standard type
Examples:
----------
>>> from pandapower.pf.runpp_3ph import runpp_3ph
>>> runpp_3ph(net)
Notes:
--------
- Three phase load flow uses Sequence Frame for power flow solution.
- Three phase system is modelled with earth return.
- PH-E load type is called as wye since Neutral and Earth are considered same
- This solver has proved successful only for Earthed transformers (i.e Dyn,Yyn,YNyn & Yzn vector groups)
"""
# =============================================================================
# pandapower settings
# =============================================================================
overrule_options = {}
if "user_pf_options" in net.keys() and len(net.user_pf_options) > 0:
passed_parameters = _passed_runpp_parameters(locals())
overrule_options = {key: val for key, val in net.user_pf_options.items()
if key not in passed_parameters.keys()}
if numba:
numba = _check_if_numba_is_installed(numba)
ac = True
mode = "pf_3ph" # TODO: Make valid modes (pf, pf_3ph, se, etc.) available in seperate file (similar to idx_bus.py)
# v_debug = kwargs.get("v_debug", False)
copy_constraints_to_ppc = False
if trafo_model == 'pi':
raise Not_implemented("Three phase Power Flow doesnot support pi model\
because of lack of accuracy")
# if calculate_voltage_angles == "auto":
# calculate_voltage_angles = False
# hv_buses = np.where(net.bus.vn_kv.values > 70)[0] # Todo: Where does that number come from?
# if len(hv_buses) > 0:
# line_buses = net.line[["from_bus", "to_bus"]].values.flatten()
# if len(set(net.bus.index[hv_buses]) & set(line_buses)) > 0:
# scipy spsolve options in NR power flow
use_umfpack = kwargs.get("use_umfpack", True)
permc_spec = kwargs.get("permc_spec", None)
calculate_voltage_angles = True
if init == "results" and len(net.res_bus) == 0:
init = "auto"
if init == "auto":
init = "dc" if calculate_voltage_angles else "flat"
default_max_iteration = {"nr": 10, "bfsw": 10, "gs": 10000, "fdxb": 30,
"fdbx": 30}
if max_iteration == "auto":
max_iteration = default_max_iteration["nr"]
neglect_open_switch_branches = kwargs.get("neglect_open_switch_branches", False)
only_v_results = kwargs.get("only_v_results", False)
if (recycle is not None and recycle is not False):
raise ValueError("recycle is only available with Balanced Load Flow ")
net._options = {}
_add_ppc_options(net, calculate_voltage_angles=calculate_voltage_angles,
trafo_model=trafo_model, check_connectivity=check_connectivity,
mode=mode, switch_rx_ratio=switch_rx_ratio,
init_vm_pu=init, init_va_degree=init,
enforce_q_lims=enforce_q_lims, recycle=recycle,
voltage_depend_loads=False, delta=delta_q,\
neglect_open_switch_branches=neglect_open_switch_branches
)
_add_pf_options(net, tolerance_mva=tolerance_mva, trafo_loading=trafo_loading,
numba=numba, ac=ac, algorithm="nr", max_iteration=max_iteration,\
only_v_results=only_v_results,v_debug=v_debug, use_umfpack=use_umfpack,
permc_spec=permc_spec)
net._options.update(overrule_options)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
init_results(net, "pf_3ph")
# =========================================================================
# pd2ppc conversion
# =========================================================================
net["_is_elements"] = None
_, ppci1 = _pd2ppc(net, 1)
_, ppci2 = _pd2ppc(net, 2)
gs_eg, bs_eg = _add_ext_grid_sc_impedance(net, ppci2)
_, ppci0 = _pd2ppc(net, 0)
_, bus0, gen0, branch0, _, _, _, _, _,\
v00, ref_gens = _get_pf_variables_from_ppci(ppci0)
base_mva, bus1, gen1, branch1, sl_bus, pv_bus, pq_bus, _, _, \
v01, ref_gens = _get_pf_variables_from_ppci(ppci1)
_, bus2, gen2, branch2, _, _, _, _, _, \
v02, ref_gens = _get_pf_variables_from_ppci(ppci2)
# =============================================================================
# P Q values aggragated and summed up for each bus to make s_abc matrix
# s_abc for wye connections ; s_abc_delta for delta connection
# =============================================================================
s_abc_delta, s_abc = _load_mapping(net, ppci1)
# =========================================================================
# Construct Sequence Frame Bus admittance matrices Ybus
# =========================================================================
ppci0, ppci1, ppci2, y_0_pu, y_1_pu, y_2_pu, y_0_f, y_1_f, y_2_f,\
y_0_t, y_1_t, y_2_t = _get_y_bus(ppci0, ppci1, ppci2, recycle)
# =========================================================================
# Initial voltage values
# =========================================================================
nb = ppci1["bus"].shape[0]
v_012_it = np.concatenate(
(
np.array(np.zeros((1, nb), dtype=np.complex128)),
np.array(np.ones((1, nb), dtype=np.complex128)),
np.array(np.zeros((1, nb), dtype=np.complex128))
),
axis=0)
# For Delta transformation:
# Voltage changed from line-earth to line-line using V_T
# s_abc/v_abc will now give line-line currents. This is converted to line-earth
# current using I-T
v_del_xfmn = np.array([[1, -1, 0],
[0, 1, -1],
[-1, 0, 1]])
i_del_xfmn = np.array([[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]])
v_abc_it = sequence_to_phase(v_012_it)
# =========================================================================
# Iteration using Power mismatch criterion
# =========================================================================
outer_tolerance_mva = 3e-8
count = 0
s_mismatch = np.array([[True], [True]], dtype=bool)
t0 = time()
while (s_mismatch > outer_tolerance_mva).any() and count < 30*max_iteration:
# =====================================================================
# Voltages and Current transformation for PQ and Slack bus
# =====================================================================
s_abc_pu = -np.divide(s_abc, ppci1["baseMVA"])
s_abc_delta_pu = -np.divide(s_abc_delta, ppci1["baseMVA"])
i_abc_it_wye = (np.divide(s_abc_pu, v_abc_it)).conjugate()
i_abc_it_delta = np.matmul(i_del_xfmn, (np.divide(s_abc_delta_pu, np.matmul
(v_del_xfmn, v_abc_it))).conjugate())
# For buses with both delta and wye loads we need to sum of their currents
# to sum up the currents
i_abc_it = i_abc_it_wye + i_abc_it_delta
i012_it = phase_to_sequence(i_abc_it)
v1_for_s1 = v_012_it[1, :]
i1_for_s1 = -i012_it[1, :]
v0_pu_it = X012_to_X0(v_012_it)
v2_pu_it = X012_to_X2(v_012_it)
i0_pu_it = X012_to_X0(i012_it)
i2_pu_it = X012_to_X2(i012_it)
s1 = np.multiply(v1_for_s1, i1_for_s1.conjugate())
# =============================================================================
# Current used to find S1 Positive sequence power
# =============================================================================
ppci1["bus"][pq_bus, PD] = np.real(s1[pq_bus]) * ppci1["baseMVA"]
ppci1["bus"][pq_bus, QD] = | np.imag(s1[pq_bus]) | numpy.imag |
import numpy as np
import scipy.linalg
class KalmanFilter(object):
def __init__(self):
ndim, dt = 2, 1.
# Create Kalman filter model matrices.
self._motion_mat = np.eye(2 * ndim, 2 * ndim)
for i in range(ndim):
self._motion_mat[i, ndim + i] = dt
self._update_mat = np.eye(ndim, 2 * ndim)
self._std_position = 0.200
self._std_velocity = 0.0350
def initiate(self, measurement):
"""Create track from unassociated measurement.
Parameters
----------
measurement : ndarray
world coordinates of a box (x,y)
Returns
-------
(ndarray, ndarray)
Returns the mean vector (4 dimensional) and covariance matrix (4x4
dimensional) of the new track. Unobserved velocities are initialized
to 0 mean.
"""
mean_pos = measurement
mean_vel = np.zeros_like(mean_pos)
mean = np.r_[mean_pos, mean_vel]
std = [
self._std_position,
self._std_position,
self._std_velocity,
self._std_velocity
]
covariance = np.diag(np.square(std))
return mean, covariance
def predict(self, mean, covariance):
"""Run Kalman filter prediction step.
Parameters
----------
mean : ndarray
The 4 dimensional mean vector of the object state at the previous time step.
covariance : ndarray
The 4x4 dimensional covariance matrix of the object state at the previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted
state. Unobserved velocities are initialized to 0 mean.
"""
std_pos = [
self._std_position,
self._std_position
]
std_vel = [
self._std_velocity,
self._std_velocity
]
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
mean = np.dot(self._motion_mat, mean)
covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
return mean, covariance
def project(self, mean, covariance):
"""Project state distribution to measurement space.
Parameters
----------
mean : ndarray
The state's mean vector (4 dimensional array).
covariance : ndarray
The state's covariance matrix (4x4 dimensional).
Returns
-------
(ndarray, ndarray)
Returns the projected mean and covariance matrix of the given state
estimate.
"""
std = [
self._std_position,
self._std_position
]
innovation_cov = np.diag(np.square(std))
mean = np.dot(self._update_mat, mean)
covariance = np.linalg.multi_dot((
self._update_mat, covariance, self._update_mat.T))
return mean, covariance + innovation_cov
def update(self, mean, covariance, measurement):
"""Run Kalman filter correction step.
Parameters
----------
mean : ndarray
The predicted state's mean vector (4 dimensional).
covariance : ndarray
The state's covariance matrix (4x4 dimensional).
measurement : ndarray
world coordinates of a box (x, y)
Returns
-------
(ndarray, ndarray)
Returns the measurement-corrected state distribution.
"""
projected_mean, projected_cov = self.project(mean, covariance)
chol_factor, lower = scipy.linalg.cho_factor(
projected_cov, lower=True, check_finite=False)
kalman_gain = scipy.linalg.cho_solve(
(chol_factor, lower), | np.dot(covariance, self._update_mat.T) | numpy.dot |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["TimeSeries"]
import numpy as np
from itertools import izip
from .frame import Frame
from ._kpsf import solve, N_INT_TIME
class TimeSeries(object):
def __init__(self, time, flux_images, ferr_images, quality, **kwargs):
# Initialize the frame images.
self.time = time
self.frames = []
for i, (f, fe) in enumerate(izip(flux_images, ferr_images)):
frame = []
if quality[i] == 0:
frame = Frame(f, fe, **kwargs)
if not np.any(frame.mask):
frame = []
self.frames.append(frame)
# Save the frame shape.
self.shape = self.frames[0].shape
if any(f.shape != self.shape for f in self.frames if len(f)):
raise ValueError("Frame shape mismatch")
# Update the frames to have a coherent time series.
self.initialize()
def initialize(self):
# Traverse the graph and construct the (greedy) best path.
ns = min(map(len, filter(len, self.frames)))
metric = np.array([1.0, 1.0, 1e-8])
current = None
for t, node in enumerate(self.frames):
if not len(node):
continue
if current is None:
current = node.coords[:ns]
node.coords = current
continue
# Compute the set of distances between this node and the current
# position.
r = sum([(node.coords[k][:, None] - current[k][None, :]) ** 2
* metric[i] for i, k in enumerate(("x", "y", "flux"))])
r0 = np.array(r)
# Loop over the permutations and greedily choose the best update.
rows, cols = np.arange(r.shape[0]), np.arange(r.shape[1])
update = np.nan + np.zeros(ns)
while any(np.isnan(update)):
row, col = np.unravel_index(np.argmin(r), r.shape)
update[cols[col]] = rows[row]
r = np.delete(r, row, axis=0)
r = np.delete(r, col, axis=1)
rows = np.delete(rows, row)
cols = np.delete(cols, col)
update = np.array(update, dtype=int)
# Compute the total cost. MAGIC
cost = np.sum(r0[(update, range(ns))])
if cost > 10.0:
node.coords = None
continue
# Update the current locations.
current = np.array([node.coords[j] for j in update])
self.frames[t].coords = current
# Approximate the frame motion as the motion of the brightest star.
self.origin = np.nan + np.zeros((len(self.frames), N_INT_TIME, 2))
for t, node in enumerate(self.frames):
if not len(node):
continue
self.origin[t, None, :] = node.coords["x"][0], node.coords["y"][0]
# Find the list of times that were acceptable.
self.good_times = np.all(np.isfinite(self.origin), axis=(1, 2))
# Center the motion and compute the mean offsets.
self.origin[self.good_times] -= np.mean(self.origin[self.good_times],
axis=0)
self.origin[np.isnan(self.origin)] = 0.0
self.offsets = | np.zeros((ns, 2)) | numpy.zeros |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.vgg import vgg19
from models.backbones.residualconv import Residual_Conv
from models.backbones.resnet import ResNet_Backbone
from models.base_net import Base_of_Network
from models.decoders.upsample import UpSample_Layers
from models.decoders.upsample_v2 import UpSample_Layers_v2
from utils import platform_manager
@platform_manager.MODELS.add_module
class FAL_NetB(Base_of_Network):
def _initialize_model(
self,
out_num=49,
encoder='FALB',
decoder='Upv2',
num_ch_dec = [64, 128, 256, 256, 256],
out_scales = [0],
min_disp=2,
max_disp=300,
image_size=[192, 640],
mom_module=False,
raw_fal_arch=False,
):
self.init_opts = locals()
self.out_num = out_num
self.encoder = encoder
self.decoder = decoder
self.num_ch_dec = num_ch_dec
self.out_scales = out_scales
self.min_disp = min_disp
self.max_disp = max_disp
self.image_size = image_size
self.mom_module = mom_module
self.raw_fal_arch = raw_fal_arch
# Initialize architecture
self.convblocks = {}
if self.encoder == 'FALB':
num_ch_enc = [32, 64, 128, 256, 256, 256, 512]
self.convblocks['enc'] = Residual_Conv(num_ch_enc=num_ch_enc,
input_flow=self.raw_fal_arch)
elif 'Res' in self.encoder:
encoder_layer = int(self.encoder[3:])
self.convblocks['enc'] = ResNet_Backbone(encoder_layer)
if encoder_layer <= 34:
num_ch_enc = [64, 64, 128, 256, 512]
else:
num_ch_enc = [64, 256, 512, 1024, 2048]
if self.decoder == 'Upv2':
assert self.encoder == 'FALB',\
'Upv2 decoder must be used with FALB encoder'
self.convblocks['dec'] = UpSample_Layers_v2(
num_ch_enc,
self.num_ch_dec,
output_ch=out_num,
raw_fal_arch=self.raw_fal_arch)
elif self.decoder == 'Upv1':
# When use Upv1 with FALB, please set
# num_ch_dec = [64, 64, 128, 256, 256, 256]
self.convblocks['dec'] = UpSample_Layers(num_ch_enc,
self.num_ch_dec,
output_ch=out_num,
out_scales=[0])
self._networks = nn.ModuleList(list(self.convblocks.values()))
# Initialize transformer
disp_range = []
rel_disp = self.max_disp / self.min_disp
for disp_idx in range(self.out_num):
index_num = rel_disp**(disp_idx / (self.out_num - 1) - 1)
disp = self.max_disp * index_num
disp_range.append(disp)
volume_dk = torch.tensor(disp_range).unsqueeze(1).unsqueeze(1)
volume_dk = volume_dk.unsqueeze(0).unsqueeze(0)
self.volume_dk = volume_dk.to(self.device)
# side(s) transformer means warped to LEFT
# other-side(o) transformer means warped to RIGHT
self.transformer = {}
self.transformer['o'] = DispTransformer(self.image_size, disp_range,
self.device)
self.transformer['s'] = DispTransformer(self.image_size,
[-disp for disp in disp_range],
self.device)
self.feat_net = []
# load pretrained vgg19 module
vgg = vgg19(pretrained=True, progress=False).features.to(self.device)
vgg_feats = list(vgg.modules())
vgg_layer_num = [5, 5, 9]
read_module_num = 0
for module_num in vgg_layer_num:
self.feat_net.append(nn.Sequential())
for _ in range(module_num):
self.feat_net[-1].add_module(str(read_module_num),
vgg_feats[read_module_num + 1])
read_module_num += 1
self.train_sides = ['s']
if self.mom_module:
self.train_sides.append('o')
self.fix_network = {}
self.fix_network['enc'] = Residual_Conv(num_ch_enc=num_ch_enc,
input_flow=self.raw_fal_arch).to(self.device)
self.fix_network['dec'] = UpSample_Layers_v2(num_ch_enc,
num_ch_dec, output_ch=out_num,
raw_fal_arch=self.raw_fal_arch).to(self.device)
self.loaded_flag = False
else:
self.loaded_flag = True
def forward(self, inputs, is_train=True):
self.inputs = inputs
outputs = {}
if is_train:
losses = {}
losses['loss'] = 0
if not self.loaded_flag:
self.fix_network['enc'].load_state_dict(
self.convblocks['enc'].state_dict().copy())
self.fix_network['dec'].load_state_dict(
self.convblocks['dec'].state_dict().copy())
self.loaded_flag = True
directs = self.inputs['direct']
directs = directs.unsqueeze(1).unsqueeze(1).unsqueeze(1)
for train_side in self.train_sides:
# oside is used to select the disparity transformer
oside = 'o' if train_side == 's' else 's'
# process inputs
t_img_aug = self.inputs['color_{}_aug'.format(train_side)]
# generate the train side disparity
raw_volume = self._get_dispvolume_from_img(train_side)
d_volume = raw_volume.unsqueeze(1)
p_volume = self._get_probvolume_from_dispvolume(d_volume)
pred_disp = self._get_disp_from_probvolume(p_volume, directs)
w_d_volume = self.transformer[oside]\
.get_warped_volume(d_volume, directs)
w_p_volume = self._get_probvolume_from_dispvolume(w_d_volume)
# biuld the outputs
outputs['disp_{}'.format(train_side)] = pred_disp
outputs['p_volume_{}'.format(train_side)] = p_volume.detach()
outputs['w_p_volume_{}'.format(
train_side)] = w_p_volume.detach()
if self.mom_module:
outputs['depth_{}'.format(
train_side)] = self._get_depth_from_disp(pred_disp)
with torch.no_grad():
if train_side == 's':
t_img_aug_f = torch.flip(t_img_aug, [3])
else:
t_img_aug_f = t_img_aug
if self.raw_fal_arch:
B, C, H, W = t_img_aug_f.shape
flow = torch.ones(B, 1, H, W).type(t_img_aug_f.type())
flow[:, 0, :, :] = self.max_disp * flow[:, 0, :, :] / 100
x_f = t_img_aug_f
else:
x_f = t_img_aug_f / 0.225
flow = None
features_f = self.fix_network['enc'](x_f, flow)
raw_volume_f = self.fix_network['dec'](
features_f, t_img_aug_f.shape)
d_volume_f = raw_volume_f.unsqueeze(1)
p_volume_f = self._get_probvolume_from_dispvolume(
d_volume_f)
pred_disp_f = self._get_disp_from_probvolume(
p_volume_f, directs)
pred_depth_f = self._get_depth_from_disp(pred_disp_f)
if train_side == 's':
pred_depth_ff = torch.flip(pred_depth_f, [3])
else:
pred_depth_ff = pred_depth_f
outputs['disp_f_{}'.format(train_side)] = pred_disp_f
outputs['depth_ff_{}'.format(
train_side)] = pred_depth_ff
mask = torch.ones_like(pred_depth_ff)
mask = mask / pred_depth_ff.max()
outputs['ff_mask_{}'.format(oside)] = mask
# generate the synthetic image in right side
# named side but its synthesized the image of other side
w_img = self.transformer[oside]\
.get_warped_frame(t_img_aug, directs)
synth_img = (w_p_volume * w_img).sum(dim=2)
outputs['synth_img_{}'.format(train_side)] = synth_img
# compute the occlusion mask
if self.mom_module:
for train_side in self.train_sides:
oside = 'o' if train_side == 's' else 's'
p_volume = outputs['p_volume_{}'.format(train_side)]
cyc_p_volume = self.transformer[oside]\
.get_warped_volume(p_volume, directs)
occ_mask1 = cyc_p_volume.sum(dim=2)
w_p_volume = outputs['w_p_volume_{}'.format(oside)]
cyc_w_p_volume = self.transformer[oside]\
.get_warped_volume(w_p_volume, directs)
occ_mask2 = cyc_w_p_volume.sum(dim=2)
occ_mask = (occ_mask1 * occ_mask2).clamp(0, 1)
outputs['mask_{}'.format(train_side)] = occ_mask
outputs['inv_mask_{}'.format(train_side)] = (1 - occ_mask)
outputs['ff_mask_{}'.format(oside)] = (
1 -
occ_mask) * outputs['ff_mask_{}'.format(train_side)]
# extract features by vgg
for train_side in self.train_sides:
oside = 'o' if train_side == 's' else 's'
raw_img = self.inputs['color_{}_aug'.format(oside)]
synth_img = outputs['synth_img_{}'.format(train_side)]
if self.mom_module:
occ_mask = outputs['mask_{}'.format(train_side)]
inv_occ_mask = outputs['inv_mask_{}'.format(train_side)]
synth_img = synth_img * occ_mask + raw_img * inv_occ_mask
outputs['synth_img_{}'.format(train_side)] = synth_img
with torch.no_grad():
raw_feats = self._get_conv_feats_from_image(raw_img)
synth_feats = self._get_conv_feats_from_image(synth_img)
for feat_idx in range(3):
rawf_name = 'raw_feats_{}_{}'.format(feat_idx, train_side)
outputs[rawf_name] = raw_feats[feat_idx]
synthf_name = 'synth_feats_{}_{}'.format(
feat_idx, train_side)
outputs[synthf_name] = synth_feats[feat_idx]
# compute the losses
for train_side in self.train_sides:
self._compute_losses(outputs,
train_side,
losses,
add_loss=False)
self._add_final_losses(train_side, losses)
return outputs, losses
else:
raw_volume = self._get_dispvolume_from_img('s', aug='')
d_volume = raw_volume.unsqueeze(1)
p_volume = self._get_probvolume_from_dispvolume(d_volume)
pred_disp = self._get_disp_from_probvolume(p_volume)
pred_depth = self._get_depth_from_disp(pred_disp)
outputs[('depth', 's')] = pred_depth
return outputs
def _get_dispvolume_from_img(self, side, aug='_aug'):
input_img = self.inputs['color_{}{}'.format(side, aug)].clone()
if side == 'o':
input_img = torch.flip(input_img, dims=[3])
if self.raw_fal_arch:
B, C, H, W = input_img.shape
flow = torch.ones(B, 1, H, W).type(input_img.type())
flow[:, 0, :, :] = self.max_disp * flow[:, 0, :, :] / 100
x = input_img
features = self.convblocks['enc'](x, flow)
else:
x = input_img
features = self.convblocks['enc'](x)
out_volume = self.convblocks['dec'](features, input_img.shape)
if side == 'o':
out_volume = torch.flip(out_volume, dims=[3])
return out_volume
def _upsample(self, x, shape):
return F.interpolate(x, size=shape[2:], mode='nearest')
def _get_probvolume_from_dispvolume(self, volume):
return F.softmax(volume, dim=2)
def _get_disp_from_probvolume(self, volume, directs=None):
disp = (volume * self.volume_dk).sum(dim=2)
if directs is not None:
disp = disp * torch.abs(directs)
return disp
def _get_mask_from_probvolume(self, volume):
raw_mask = volume.sum(dim=2)
return raw_mask.clamp(0, 1)
def _get_depth_from_disp(self, disp):
if 'disp_k' in self.inputs:
k = self.inputs['disp_k'].unsqueeze(1).unsqueeze(1).unsqueeze(1)
else:
k = torch.tensor([721.54 * 0.54], dtype=torch.float)
k = k.unsqueeze(1).unsqueeze(1).unsqueeze(1).to(disp)
depth = torch.abs(k) / disp
return depth
def _get_conv_feats_from_image(self, raw_img):
feats = []
x = raw_img
for block_idx in range(len(self.feat_net)):
x = self.feat_net[block_idx](x)
feats.append(x)
return feats
class Conv3x3(nn.Module):
"""Layer to pad and convolve input from
https://github.com/nianticlabs/monodepth2."""
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv3x3, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(1)
else:
self.pad = nn.ZeroPad2d(1)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
class ConvBlock(nn.Module):
"""Layer to perform a convolution followed by ELU from
https://github.com/nianticlabs/monodepth2."""
def __init__(self, in_channels, out_channels, bn=False, nonlin=True):
super(ConvBlock, self).__init__()
self.conv = Conv3x3(in_channels, out_channels)
if nonlin:
self.nonlin = nn.ELU(inplace=True)
else:
self.nonlin = None
if bn:
self.bn = nn.BatchNorm2d(out_channels)
else:
self.bn = None
def forward(self, x):
out = self.conv(x)
if self.bn is not None:
out = self.bn(out)
if self.nonlin is not None:
out = self.nonlin(out)
return out
class AtrousConv(nn.Sequential):
def __init__(self,
in_channels,
out_channels,
dilation,
apply_bn_first=True):
super().__init__()
self.atrous_conv = torch.nn.Sequential()
if apply_bn_first:
self.atrous_conv.add_module(
'first_bn',
nn.BatchNorm2d(in_channels,
momentum=0.01,
affine=True,
track_running_stats=True,
eps=1.1e-5))
self.atrous_conv.add_module(
'aconv_sequence',
nn.Sequential(
nn.ReLU(),
nn.Conv2d(in_channels=in_channels,
out_channels=out_channels * 2,
bias=False,
kernel_size=1,
stride=1,
padding=0),
nn.BatchNorm2d(out_channels * 2,
momentum=0.01,
affine=True,
track_running_stats=True), nn.ReLU(),
nn.Conv2d(in_channels=out_channels * 2,
out_channels=out_channels,
bias=False,
kernel_size=3,
stride=1,
padding=(dilation, dilation),
dilation=dilation)))
def forward(self, x):
return self.atrous_conv.forward(x)
class DispTransformer(object):
def __init__(self, image_size, disp_range, device='cuda'):
i_tetha = torch.zeros(1, 2, 3)
i_tetha[:, 0, 0] = 1
i_tetha[:, 1, 1] = 1
normal_coord = F.affine_grid(i_tetha,
[1, 1, image_size[0], image_size[1]],
align_corners=True)
self.base_coord = normal_coord.to(device)
# add disparity
self.normal_disp_bunch = []
zeros = torch.zeros_like(self.base_coord)
for disp in disp_range:
disp_map = zeros.clone()
disp_map[..., 0] = disp_map[..., 0] + disp
normal_disp_map = self._get_normalize_coord(disp_map, image_size)
normal_disp_map = normal_disp_map.to(torch.device(device))
self.normal_disp_bunch.append(normal_disp_map)
self.ch_num = len(disp_range)
def _get_normalize_coord(self, coord, image_size):
"""TODO."""
coord[..., 0] /= (image_size[1] / 2)
coord[..., 1] /= (image_size[0] / 2)
return coord
def get_warped_volume(self, volume, directs):
"""Warp the volume by disparity range with zeros padding."""
bs = volume.shape[0]
new_volume = torch.zeros_like(volume)
for ch_idx in range(self.ch_num):
normal_disp = self.normal_disp_bunch[ch_idx].repeat(bs, 1, 1, 1)
# To adapt flip data augment
grid_coord = normal_disp * directs + self.base_coord
warped_frame = F.grid_sample(volume[:, :, ch_idx, ...],
grid_coord,
mode='bilinear',
padding_mode='zeros',
align_corners=True)
new_volume[:, :, ch_idx, ...] = warped_frame
return new_volume
def get_warped_frame(self, x, directs, base_coord=None, coords_k=None):
"""Warp the images by disparity range with border padding."""
if base_coord is None:
base_coord = self.base_coord
bs, ch, h, w = x.shape
else:
bs, h, w, _ = base_coord.shape
ch = x.shape[1]
directs *= coords_k
frame_volume = torch.zeros((bs, ch, self.ch_num, h, w)).to(x)
for ch_idx in range(self.ch_num):
normal_disp = self.normal_disp_bunch[ch_idx].repeat(bs, 1, 1, 1)
# To adapt flip data augment
grid_coord = normal_disp * directs + base_coord
warped_frame = F.grid_sample(x,
grid_coord,
mode='bilinear',
padding_mode='border',
align_corners=True)
frame_volume[:, :, ch_idx, ...] = warped_frame
return frame_volume
class DepthProjecter(nn.Module):
def __init__(self, image_size):
super().__init__()
self.height = image_size[0]
self.width = image_size[1]
meshgrid = np.meshgrid(range(self.width),
range(self.height),
indexing='xy')
self.id_coords = | np.stack(meshgrid, axis=0) | numpy.stack |
# -*- coding: utf-8 -*-
import numpy
class Dummy():
def __init__(self):
'''nothing to do here'''
def calc_pm1(self, exp_vars):
return self.__sum_exp_vars(exp_vars)
def calc_pm2(self, exp_vars):
return self.__sum_exp_vars(exp_vars) * 2
def calc_pm10(self, exp_vars):
return self.__sum_exp_vars(exp_vars) * 10
def calc_pm100(self, exp_vars):
return self.__sum_exp_vars(exp_vars) * 100
def __sum_exp_vars(self,ev):
return ev['exp_var 1'] + ev['exp_var 2']
def __call__(self, **kwargs):
return dict(
pm_1=self.calc_pm1(kwargs),
pm_2=self.calc_pm2(kwargs),
pm_10=self.calc_pm10(kwargs),
pm_100=self.calc_pm100(kwargs),
)
def NoisyDummy(**kwargs):
lever1 = kwargs.get('lever1', 0)
lever2 = kwargs.get('lever2', 0)
uncertain1 = kwargs.get('uncertain1', 3)
uncertain2 = numpy.exp(kwargs.get('uncertain2', -0.7))
uncertain3 = numpy.exp(kwargs.get('uncertain3', 0.7))
certain4 = kwargs.get('certain4', 3)
noise_amplitude = kwargs.get('noise_amplitude', 2.0)
noise_frequency = kwargs.get('noise_frequency', 5.0)
pm_1 = (
- uncertain2 * lever1 * lever1
+ (uncertain1 + certain4) * (lever1 + lever2)
+ noise_amplitude * numpy.sin(noise_frequency * lever1)
)
pm_2 = numpy.minimum(
1.11e+111 * uncertain1,
numpy.exp(
uncertain3 * lever1 * (lever1 + lever2)
+ uncertain1 * lever1
+ noise_amplitude * numpy.cos(noise_frequency * lever2)
)
)
pm_3 = (
noise_amplitude * numpy.cos(noise_frequency * lever1)
+ noise_amplitude * numpy.sin(noise_frequency * lever2)
+ certain4
)
pm_4 = numpy.exp(
uncertain1 + certain4
)
return {'pm_1':pm_1, 'pm_2': pm_2, 'pm_3': pm_3, 'pm_4':pm_4}
def Road_Capacity_Investment(
# constant
free_flow_time=60,
initial_capacity=100,
# uncertainty
alpha=0.15,
beta=4.0,
input_flow=100,
value_of_time=0.01,
unit_cost_expansion=1,
interest_rate=0.03,
yield_curve=0.01,
# policy
expand_capacity=10,
amortization_period=30,
interest_rate_lock=False,
debt_type='GO Bond',
lane_width=10,
**kwargs,
):
"""
A fictitious example model for road capacity investment.
This model simulates a capacity expansion investment on a single
network link. The link volume-delay function is governed by the
`BPR function <https://en.wikipedia.org/wiki/Route_assignment#Frank-Wolfe_algorithm>`_.
This model is a bit contrived, because it is designed to explicitly demonstrate
a wide variety of EMAT features in a transportation planning model that is as simple
as possible. For example, the policy levers are structured so that there is one
of each dtype (float, int, bool, and categorical).
Args:
free_flow_time (float, default 60): The free flow travel time on the link.
initial_capacity (float, default 100): The pre-expansion capacity on the link.
alpha (float, default 0.15): Alpha parameter to the BPR volume-delay function.
beta (float, default 4.0): Beta parameter to the BPR volume-delay function.
input_flow (float, default 100): The future input flow on the link.
value_of_time (float, default 0.01): The value of a unit of travel time savings
per unit of flow on the link.
unit_cost_expansion (float, default 1): The present marginal cost of adding one
unit of capacity to the link (assumes no economies of scale on expansion cost)
interest_rate (float, default 0.03): The interest rate actually incurred for
revenue bonds amortized over 15 years. The interest rate for general obligation
bonds is assumed to be 0.0025 less than this value.
yield_curve (float, default 0.01): The marginal increase in the interest_rate if
the amortization period is 50 years instead of 15. The yield curve is assumed
to be linearly projected to all other possible amortization periods
expand_capacity (float, default 10): The amount of capacity expansion actually
constructed.
amortization_period (int, default 30): The time period over which the construction
costs are amortized.
interest_rate_lock (bool, default False): Whether interest rates are locked at
the assumed current rate of 0.03 / 0.01 or allowed to float.
debt_type ('GO Bond', 'Rev Bond', 'Paygo'): Type of financing. General obligation
bonds are assumed to have a lower interest rate than revenue bonds, but
may be politically less desirable. Pay-as-you-go financing incurs no actual
interest costs, but requires actually having the funds available.
lane_width (float, default 10): The width of lanes on the roadway. This parameter
is intentionally wacky, causing massive congestion for any value other than 10,
to demonstrate what might happen with broken model inputs.
Returns:
dict:
no_build_travel_time
The average travel time on the link if no
capacity expansion was constructed.
build_travel_time
The average travel time on the link after expansion.
time_savings
The average travel time savings as a result of the
expansion.
value_of_time_savings
The total value of the travel time savings,
accounting for the time savings per traveler, the total flow, and
the value of time.
present_cost_expansion
The present cost of building the expansion
cost_of_capacity_expansion
The annual payment to finance the expansion,
when amortized.
net_benefits
The value of the time savings minus the annual payment.
"""
debt_type = debt_type.lower()
assert debt_type in ('go bond', 'paygo', 'rev bond')
average_travel_time0 = free_flow_time * (1 + alpha*(input_flow/initial_capacity)**beta)
capacity = initial_capacity + expand_capacity
average_travel_time1 = free_flow_time * (1 + alpha*(input_flow/capacity)**beta)
average_travel_time1 += (numpy.absolute(lane_width-10)*1000)**0.5
travel_time_savings = average_travel_time0 - average_travel_time1
value_of_time_savings = value_of_time * travel_time_savings * input_flow
present_cost_of_capacity_expansion = float(unit_cost_expansion * expand_capacity)
if interest_rate_lock:
interest_rate = 0.03
yield_curve = 0.01
if (debt_type == 'go bond'):
interest_rate -= 0.0025
elif (debt_type == 'paygo'):
interest_rate = 0
effective_interest_rate = interest_rate + yield_curve * (amortization_period-15) / 35
cost_of_capacity_expansion = numpy.pmt(effective_interest_rate,
amortization_period,
present_cost_of_capacity_expansion, )
return dict(
no_build_travel_time=average_travel_time0,
build_travel_time=average_travel_time1,
time_savings=travel_time_savings,
value_of_time_savings=value_of_time_savings,
present_cost_expansion=present_cost_of_capacity_expansion,
cost_of_capacity_expansion=-cost_of_capacity_expansion,
net_benefits = value_of_time_savings + cost_of_capacity_expansion,
)
def _Road_Capacity_Investment_CmdLine():
"""
This is a demo for calling a core model function on the command line.
"""
import argparse, pandas, os, numpy, sys, warnings
parser = argparse.ArgumentParser()
parser.add_argument('--levers', type=str, default='levers.yml', help='Levers Yaml File')
parser.add_argument('--uncs', type=str, default="uncs.yml", help='Uncertainties Yaml File')
parser.add_argument('--no-random-crashes', action='store_true', help='disable random crashes')
args = parser.parse_args()
import logging
logger = logging.getLogger('emat.RoadTest')
file_handler = logging.FileHandler("emat-road-test.log")
file_handler.setLevel(10)
LOG_FORMAT = '[%(asctime)s] %(name)s.%(levelname)s: %(message)s'
file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setLevel(20)
console_handler.setFormatter(logging.Formatter(LOG_FORMAT))
logger.addHandler(console_handler)
logger.addHandler(file_handler)
logger.setLevel(10)
logger.info("running emat-road-test-demo")
logger.debug(str(args))
logger.debug(str(os.getcwd()))
import yaml
if os.path.exists(args.levers):
with open(args.levers, 'rt') as f:
levers = yaml.safe_load(f)
else:
levers = {'mandatory_unused_lever':42}
if os.path.exists(args.uncs):
with open(args.uncs, 'rt') as f:
uncs = yaml.safe_load(f)
else:
uncs = {}
if 'mandatory_unused_lever' not in levers:
raise ValueError("missing 'mandatory_unused_lever'")
if levers['mandatory_unused_lever'] != 42:
raise ValueError("incorrect value for 'mandatory_unused_lever', must be 42")
if 'unit_cost_expansion' in uncs:
raise ValueError("cannot give 'unit_cost_expansion', use 'labor_unit_cost_expansion' and 'materials_unit_cost_expansion'")
if uncs.get('labor_unit_cost_expansion', 0) <= uncs.get('materials_unit_cost_expansion', 0):
raise ValueError("'labor_unit_cost_expansion' cannot be less than or equal 'materials_unit_cost_expansion'")
if uncs.get('labor_unit_cost_expansion', 0) > uncs.get('materials_unit_cost_expansion', 0)*2:
raise ValueError("'labor_unit_cost_expansion' cannot be more than double 'materials_unit_cost_expansion'")
unit_cost_expansion = uncs.pop('labor_unit_cost_expansion', 0) + uncs.pop('materials_unit_cost_expansion', 0)
uncs['unit_cost_expansion'] = unit_cost_expansion
# (pseudo)random crash
if not args.no_random_crashes:
if 'expand_capacity' in levers and levers['expand_capacity'] > 90 and not os.path.exists('prevent_random_crash.txt'):
with open('prevent_random_crash.txt', 'wt') as f:
f.write("this file will prevent random crashes in `emat-road-test-demo`")
logger.error("Random crash, ha ha!")
sys.exit(-9)
try:
for k,v in levers.items():
logger.debug(f"lever: {k} = {v}")
for k,v in uncs.items():
logger.debug(f"uncertainty: {k} = {v}")
result = Road_Capacity_Investment(**levers, **uncs)
for k,v in result.items():
logger.debug(f"result: {k} = {v}")
result1 = {str(k):float(result[k]) for k in ['no_build_travel_time','build_travel_time','time_savings']}
result2 = pandas.DataFrame({
'value_of_time_savings': [ | numpy.exp(result['value_of_time_savings']/1000) | numpy.exp |
import numpy as np
import pandas as pd
from numpy.testing import assert_, assert_equal, assert_allclose, assert_raises
from statsmodels.tsa.arima import specification, params
def test_init():
# Test initialization of the params
# Basic test, with 1 of each parameter
exog = pd.DataFrame([[0]], columns=['a'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(1, 1, 1), seasonal_order=(1, 1, 1, 4))
p = params.SARIMAXParams(spec=spec)
# Test things copied over from spec
assert_equal(p.spec, spec)
assert_equal(p.exog_names, ['a'])
assert_equal(p.ar_names, ['ar.L1'])
assert_equal(p.ma_names, ['ma.L1'])
assert_equal(p.seasonal_ar_names, ['ar.S.L4'])
assert_equal(p.seasonal_ma_names, ['ma.S.L4'])
assert_equal(p.param_names, ['a', 'ar.L1', 'ma.L1', 'ar.S.L4', 'ma.S.L4',
'sigma2'])
assert_equal(p.k_exog_params, 1)
assert_equal(p.k_ar_params, 1)
assert_equal(p.k_ma_params, 1)
assert_equal(p.k_seasonal_ar_params, 1)
assert_equal(p.k_seasonal_ma_params, 1)
assert_equal(p.k_params, 6)
# Initial parameters should all be NaN
assert_equal(p.params, np.nan)
assert_equal(p.ar_params, [np.nan])
assert_equal(p.ma_params, [np.nan])
assert_equal(p.seasonal_ar_params, [np.nan])
assert_equal(p.seasonal_ma_params, [np.nan])
assert_equal(p.sigma2, np.nan)
assert_equal(p.ar_poly.coef, np.r_[1, np.nan])
assert_equal(p.ma_poly.coef, np.r_[1, np.nan])
assert_equal(p.seasonal_ar_poly.coef, np.r_[1, 0, 0, 0, np.nan])
assert_equal(p.seasonal_ma_poly.coef, np.r_[1, 0, 0, 0, np.nan])
assert_equal(p.reduced_ar_poly.coef, np.r_[1, [np.nan] * 5])
assert_equal(p.reduced_ma_poly.coef, np.r_[1, [np.nan] * 5])
# Test other properties, methods
assert_(not p.is_complete)
assert_(not p.is_valid)
assert_raises(ValueError, p.__getattribute__, 'is_stationary')
assert_raises(ValueError, p.__getattribute__, 'is_invertible')
desired = {
'exog_params': [np.nan],
'ar_params': [np.nan],
'ma_params': [np.nan],
'seasonal_ar_params': [np.nan],
'seasonal_ma_params': [np.nan],
'sigma2': np.nan}
assert_equal(p.to_dict(), desired)
desired = pd.Series([np.nan] * spec.k_params, index=spec.param_names)
assert_allclose(p.to_pandas(), desired)
# Test with different numbers of parameters for each
exog = pd.DataFrame([[0, 0]], columns=['a', 'b'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(3, 1, 2), seasonal_order=(5, 1, 6, 4))
p = params.SARIMAXParams(spec=spec)
# No real need to test names here, since they are already tested above for
# the 1-param case, and tested more extensively in test for
# SARIMAXSpecification
assert_equal(p.k_exog_params, 2)
assert_equal(p.k_ar_params, 3)
assert_equal(p.k_ma_params, 2)
assert_equal(p.k_seasonal_ar_params, 5)
assert_equal(p.k_seasonal_ma_params, 6)
assert_equal(p.k_params, 2 + 3 + 2 + 5 + 6 + 1)
def test_set_params_single():
# Test setting parameters directly (i.e. we test setting the AR/MA
# parameters by setting the lag polynomials elsewhere)
# Here each type has only a single parameters
exog = pd.DataFrame([[0]], columns=['a'])
spec = specification.SARIMAXSpecification(
exog=exog, order=(1, 1, 1), seasonal_order=(1, 1, 1, 4))
p = params.SARIMAXParams(spec=spec)
def check(is_stationary='raise', is_invertible='raise'):
assert_(not p.is_complete)
assert_(not p.is_valid)
if is_stationary == 'raise':
assert_raises(ValueError, p.__getattribute__, 'is_stationary')
else:
assert_equal(p.is_stationary, is_stationary)
if is_invertible == 'raise':
assert_raises(ValueError, p.__getattribute__, 'is_invertible')
else:
assert_equal(p.is_invertible, is_invertible)
# Set params one at a time, as scalars
p.exog_params = -6.
check()
p.ar_params = -5.
check()
p.ma_params = -4.
check()
p.seasonal_ar_params = -3.
check(is_stationary=False)
p.seasonal_ma_params = -2.
check(is_stationary=False, is_invertible=False)
p.sigma2 = -1.
# Finally, we have a complete set.
assert_(p.is_complete)
# But still not valid
assert_(not p.is_valid)
assert_equal(p.params, [-6, -5, -4, -3, -2, -1])
assert_equal(p.exog_params, [-6])
assert_equal(p.ar_params, [-5])
assert_equal(p.ma_params, [-4])
assert_equal(p.seasonal_ar_params, [-3])
assert_equal(p.seasonal_ma_params, [-2])
assert_equal(p.sigma2, -1.)
# Lag polynomials
assert_equal(p.ar_poly.coef, np.r_[1, 5])
assert_equal(p.ma_poly.coef, np.r_[1, -4])
assert_equal(p.seasonal_ar_poly.coef, np.r_[1, 0, 0, 0, 3])
assert_equal(p.seasonal_ma_poly.coef, np.r_[1, 0, 0, 0, -2])
# (1 - a L) (1 - b L^4) = (1 - a L - b L^4 + a b L^5)
assert_equal(p.reduced_ar_poly.coef, np.r_[1, 5, 0, 0, 3, 15])
# (1 + a L) (1 + b L^4) = (1 + a L + b L^4 + a b L^5)
assert_equal(p.reduced_ma_poly.coef, np.r_[1, -4, 0, 0, -2, 8])
# Override again, one at a time, now using lists
p.exog_params = [1.]
p.ar_params = [2.]
p.ma_params = [3.]
p.seasonal_ar_params = [4.]
p.seasonal_ma_params = [5.]
p.sigma2 = [6.]
p.params = [1, 2, 3, 4, 5, 6]
assert_equal(p.params, [1, 2, 3, 4, 5, 6])
assert_equal(p.exog_params, [1])
assert_equal(p.ar_params, [2])
assert_equal(p.ma_params, [3])
assert_equal(p.seasonal_ar_params, [4])
assert_equal(p.seasonal_ma_params, [5])
assert_equal(p.sigma2, 6.)
# Override again, one at a time, now using arrays
p.exog_params = np.array(6.)
p.ar_params = np.array(5.)
p.ma_params = np.array(4.)
p.seasonal_ar_params = np.array(3.)
p.seasonal_ma_params = np.array(2.)
p.sigma2 = np.array(1.)
assert_equal(p.params, [6, 5, 4, 3, 2, 1])
assert_equal(p.exog_params, [6])
assert_equal(p.ar_params, [5])
assert_equal(p.ma_params, [4])
assert_equal(p.seasonal_ar_params, [3])
assert_equal(p.seasonal_ma_params, [2])
assert_equal(p.sigma2, 1.)
# Override again, now setting params all at once
p.params = [1, 2, 3, 4, 5, 6]
assert_equal(p.params, [1, 2, 3, 4, 5, 6])
assert_equal(p.exog_params, [1])
assert_equal(p.ar_params, [2])
assert_equal(p.ma_params, [3])
assert_equal(p.seasonal_ar_params, [4])
assert_equal(p.seasonal_ma_params, [5])
assert_equal(p.sigma2, 6.)
# Lag polynomials
assert_equal(p.ar_poly.coef, np.r_[1, -2])
assert_equal(p.ma_poly.coef, np.r_[1, 3])
assert_equal(p.seasonal_ar_poly.coef, np.r_[1, 0, 0, 0, -4])
assert_equal(p.seasonal_ma_poly.coef, np.r_[1, 0, 0, 0, 5])
# (1 - a L) (1 - b L^4) = (1 - a L - b L^4 + a b L^5)
assert_equal(p.reduced_ar_poly.coef, np.r_[1, -2, 0, 0, -4, 8])
# (1 + a L) (1 + b L^4) = (1 + a L + b L^4 + a b L^5)
assert_equal(p.reduced_ma_poly.coef, np.r_[1, 3, 0, 0, 5, 15])
def test_set_params_single_nonconsecutive():
# Test setting parameters directly (i.e. we test setting the AR/MA
# parameters by setting the lag polynomials elsewhere)
# Here each type has only a single parameters but has non-consecutive
# lag orders
exog = pd.DataFrame([[0]], columns=['a'])
spec = specification.SARIMAXSpecification(
exog=exog, order=([0, 1], 1, [0, 1]),
seasonal_order=([0, 1], 1, [0, 1], 4))
p = params.SARIMAXParams(spec=spec)
def check(is_stationary='raise', is_invertible='raise'):
assert_(not p.is_complete)
assert_(not p.is_valid)
if is_stationary == 'raise':
assert_raises(ValueError, p.__getattribute__, 'is_stationary')
else:
assert_equal(p.is_stationary, is_stationary)
if is_invertible == 'raise':
assert_raises(ValueError, p.__getattribute__, 'is_invertible')
else:
assert_equal(p.is_invertible, is_invertible)
# Set params one at a time, as scalars
p.exog_params = -6.
check()
p.ar_params = -5.
check()
p.ma_params = -4.
check()
p.seasonal_ar_params = -3.
check(is_stationary=False)
p.seasonal_ma_params = -2.
check(is_stationary=False, is_invertible=False)
p.sigma2 = -1.
# Finally, we have a complete set.
assert_(p.is_complete)
# But still not valid
assert_(not p.is_valid)
assert_equal(p.params, [-6, -5, -4, -3, -2, -1])
assert_equal(p.exog_params, [-6])
assert_equal(p.ar_params, [-5])
assert_equal(p.ma_params, [-4])
| assert_equal(p.seasonal_ar_params, [-3]) | numpy.testing.assert_equal |
"""
Functions implementing the WR and BR-tests in the SpatialCorr algorithm.
Authors: <NAME> <<EMAIL>>
"""
import pandas as pd
import numpy as np
import math
from collections import defaultdict
from multiprocessing import Process, Manager
from sklearn.metrics.pairwise import euclidean_distances
from statsmodels.stats.multitest import multipletests
def build_normal_log_pdf(mean, cov, cov_inv=None):
"""
A closure for creating a function that calculates the multivariate
normal (MVN) density function.
Parameters
----------
mean: ndarray
N dimensional mean vector.
cov: ndarray
NxN dimensional covariance matrix.
cov_inv: ndarray
NxN dimensional precision matrix (the inverse of the covariance
matrix). If not, provided, it will be calculated automatically.
Returns
-------
normal_log_pdf: function
The MVN density function.
"""
if cov_inv is None:
cov_inv = np.linalg.inv(cov)
cov_det = np.abs(np.linalg.det(cov))
const = ((-len(mean)/2) * np.log(2*np.pi)) \
- (0.5 * np.log(cov_det))
def normal_log_pdf(x):
cent = x - mean
return const - (0.5 * np.dot(cent, np.dot(cov_inv, cent)))
return normal_log_pdf
def covariance_kernel_estimation(kernel_matrix, X):
"""
Compute the kernel estimate of the covariance matrix at each spatial
location.
Parameters
----------
kernel_matrix: ndarray
NxN matrix representing the spatial kernel (i.e., pairwise weights
between spatial locations)
X: ndarray
GxN expression matrix where G is number of genes and N is number of
spots
Returns
-------
all_covs: ndarray
NxGxG array storing the GxG covariance matrices at the N spots.
"""
# For simplicity
K = kernel_matrix
# Means will be a NxG matrix of the spot-wise
# kernel-computed means
means = (np.matmul(K, X.T).T / np.sum(K, axis=1)).T
# Compute the difference of each spotwise expression
# from its mean
devs = X.T-means
# For each spot, compute the outer product of the deviation
# vector with itself. These correspond to each term in the
# summation used to calculate kernel estimated covariance at
# each spot.
O = np.array([np.outer(d,d) for d in devs])
# This is a bit complicated. For a given spot i, to compute
# its covariance matrix, we need to compute a weighted sum
# of these matrices:
#
# \sum_j K_{ij} (X_j - mean_j)(X_j - mean_j)^T
#
# O is a list of matrices:
#
# (X_1 - mean_1)(X_1 - mean_1)^T ... (X_n - mean_n)(X_n - mean_n)^T
#
# For spot i, we can take this weighted sum via:
#
# np.einsum('i,ijk', K[i], O)
#
# To do this over all spots, we broadcast it via:
#
# np.einsum('...i,ijk', K, O)
#
all_covs = np.einsum('...i,ijk', K, O)
all_covs = (all_covs.T / np.sum(K, axis=1)).T
return all_covs
def compute_kernel_matrix(
df,
bandwidth,
region_key='cluster',
condition_on_region=False,
y_col='row',
x_col='col',
dist_matrix=None
):
"""
Compute the Gaussian kernel matrix between spots.
Parameters
----------
df: DataFrame
A pandas DataFrame storing the coordinates of each spot.
bandwidth: float
The Gaussian kernel bandwidth parameter. Higher values increase the
size of the kernel.
region_key: string, optional (default: 'cluster')
The column in `df` storing the region annotations for ensuring that
the kernel conditions on regions/clusters. Only used if `condition_on_region`
is True.
condition_on_region: boolean, optional (default: False)
If True, compute the kernel conditioned on regions stored in `region_key`.
y_col: string, optional (default: 'row')
The column in `df` storing the y-coordinates for each spot.
x_col: string, optional (default: 'col')
The column in `df' storing the x-coordinates for each spot.
dist_matrix: ndarray, optional (default: None)
An NxN matrix storing the pairwise distances between spots to be used as
input to the kernel. If `None`, Euclidean distances will be computed
automatically.
Returns
-------
kernel_matrix: ndarray
NxN array storing the pairwise weights between spots as computed by the
Gaussian kernel.
"""
if dist_matrix is None:
# Get pixel coordinates
coords = np.array(df[[y_col, x_col]])
# Euclidean distance matrix
dist_matrix = euclidean_distances(coords)
# Compute matrix conditioning on cell type
if not condition_on_region:
eta = np.full(dist_matrix.shape, 1)
else:
eta = []
for ct1 in df[region_key]:
r = []
for ct2 in df[region_key]:
r.append(int(ct1 == ct2))
eta.append(r)
eta = np.array(eta)
# Gaussian kernel matrix
kernel_matrix = np.exp(-1 * np.power(dist_matrix,2) / bandwidth**2)
kernel_matrix = np.multiply(kernel_matrix, eta)
return kernel_matrix
def _permute_expression(expr_1, expr_2, n_perms):
expr = np.array([expr_1, expr_2]).T
perms = np.array([
np.random.permutation(expr)
for i in range(n_perms)
])
return perms
def _permute_expression_cond_cell_type(X, ct_to_indices, n_perms):
"""
parameters
----------
X
GxN matrix of expression values where G is the number of genes
and N is the number of spots.
"""
expr = X.T
perms = np.zeros((n_perms, len(expr), len(X.T[0])))
for ct, indices in ct_to_indices.items():
ct_expr = expr[indices]
ct_perms = np.array([
np.random.permutation(ct_expr)
for i in range(n_perms)
])
perms[:,indices,:] = ct_perms
return perms
def _chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def _bh_region_pvals(ct_to_pval):
"""
Adjust region-specific WR p-values using Benjamini Hochberg
(BH) to correct for testing multiple regions.
Parameters
----------
ct_to_pval: dictionary
A dictionary mapping each region to its WR p-value
Returns
-------
ct_to_adj_pval: dictionary
A dictioanry mapping each region to its BH-adjusted
p-value.
"""
ct_pvals = [(k,v) for k,v in ct_to_pval.items()]
cts = [x[0] for x in ct_pvals]
pvals = [x[1] for x in ct_pvals]
_, adj_pvals, _, _ = multipletests(pvals, alpha=0.05, method='fdr_bh')
ct_to_adj_pval = {
ct: adj
for ct, adj in zip(cts, adj_pvals)
}
return ct_to_adj_pval
def _worker_between(
worker_id,
global_t_nulls,
spotwise_t_nulls,
df_filt,
perms,
kernel_matrix,
ct_to_indices,
null_corrs_filt,
keep_indices,
verbose=10,
compute_spotwise_pvals=False
):
"""
This function computes the test statistic on a chunk of permutations when
run in parallel mode.
"""
if verbose > 1:
print(f"Started worker {worker_id}...")
for perm_i, perm in enumerate(perms):
if verbose > 1 and perm_i % 25 == 0:
print(f"Worker {worker_id}, running permutation {perm_i}/{len(perms)}")
# Compute alternative likelihoods
perm_ll, perm_spot_lls = compute_llrts_between(
df_filt,
perm.T,
kernel_matrix,
ct_to_indices,
#alt_corrs_filt,
null_corrs_filt,
keep_indices
)
# Record the test statistic for this null sample
global_t_nulls.append(perm_ll)
if compute_spotwise_pvals:
spotwise_t_nulls.append(perm_spot_lls)
def _worker_within(
worker_id,
global_t_nulls,
spotwise_t_nulls,
df_filt,
perms,
kernel_matrix,
null_corrs_filt,
keep_indices,
verbose=10,
compute_spotwise_pvals=False
):
"""
This function computes the test statistic on a chunk of permutations when
run in parallel mode.
"""
if verbose > 1:
print(f"Started worker {worker_id}...")
for perm_i, perm in enumerate(perms):
if verbose > 1 and perm_i % 25 == 0:
print(f"Worker {worker_id}, running permutation {perm_i}/{len(perms)}")
# Compute alternative likelihoods
perm_ll, perm_spot_lls = compute_llrts_within(
df_filt,
perm.T,
kernel_matrix,
null_corrs_filt,
keep_indices
)
# Record the test statistic for this null sample
global_t_nulls.append(perm_ll)
if compute_spotwise_pvals:
spotwise_t_nulls.append(perm_spot_lls)
def _adjust_covs_from_corrs(covs, corrs):
"""
Given a list of covariance matrices and a list of
correlation matrices, for each pair, compute the
new covariances based on the based on the diagonal
(i.e. variance) in to match the corresponding
entries in the correlation matrix.
Parameters
----------
corrs
A sequence of Pearson correlation matrices
covs
A sequence of covariance matrices
Returns
-------
A sequence of adjusted covariance matrices
"""
new_covs = []
for cov, corr in zip(covs, corrs):
varss = np.diag(cov)
var_prods = np.sqrt(np.outer(varss, varss))
new_cov = corr * var_prods
| np.fill_diagonal(new_cov, varss) | numpy.fill_diagonal |
#!/usr/bin/env python3
from IPython.display import display, Markdown, Latex
import numpy as np
def vector2latex(vector, precision=5, pretext="", display_output=True):
out_latex = "\n$$ " + pretext
out_latex += "\\begin{bmatrix}\n"
for amplitude in vector:
amplitude = np.real_if_close(amplitude)
amp_mod = np.mod(np.real(amplitude), 1)
if (np.isclose(amp_mod, 0) or np.isclose(amp_mod, 1)) and type(amplitude) == np.ndarray:
out_latex += str(int(np.round(amplitude))) + " \\\\\n"
else:
out_latex += '{:.{}f}'.format(amplitude, precision) + " \\\\\n"
out_latex = out_latex[:-4] # remove trailing ampersands
out_latex += "\end{bmatrix} $$"
if display_output:
display(Markdown(out_latex))
else:
return out_latex
def unitary2latex(unitary, precision=5, pretext="", display_output=True):
out_latex = "\n$$ " + pretext
out_latex += "\\begin{bmatrix}\n"
for row in unitary:
out_latex += "\t" # This makes the latex source more readable
for amplitude in row:
amplitude = | np.real_if_close(amplitude) | numpy.real_if_close |
# -*- coding: utf-8 -*-
"""
Created on 6 May 2018
@author: <NAME>
This module inplements the transfermatrix-method for calculating the transmission
through a scattering region
"""
import numpy as np
from numpy import exp, sinh, cosh, linalg
import cmath
from .constants import constants
from ..model import Cell
class TransferMatrix:
""" transfermatrix object
object for caclulating the transfermatrix and the transmission-koefficient
of a scattering region
"""
def __init__(self):
"""Create object and initialize transfermatrix"""
self._M = np.zeros((2, 2))
@property
def M(self):
"""ndarray: transfermatrix of scattering region for the set energy"""
return self._M
@property
def t(self):
"""float: transmission-koefficinet of scattering region for set energy"""
m22 = self._M[1, 1]
if m22 == 0:
return 0
else:
return 1 / abs(m22) ** 2
def null(self):
"""Set to an empty transfermatrix"""
self._M = np.zeros((2, 2))
def barrier(self, e, v, d):
"""Configure the transfermatrix of a rectangular potential barrier
Parameters
----------
e : float
Energy of the particle
v : float
Potential strength of the barrier
d : float
Width of the barrier
"""
v_in = cmath.sqrt(2 * constants.m * (v - e)) / constants.hbar
v_out = cmath.sqrt(2 * constants.m * e) / constants.hbar
self._M = self._transfer_matrix_barrier(v_out, v_in, d)
def cells(self, e, cells):
"""Configure the transfermatrix of multiple unitcells for energy e
Parameters
----------
e : float
Energy of the particle
cells : array_like of Cell or Cell
unitcell(s) of the scattering region
"""
if isinstance(cells, Cell):
cells = [cells]
v_out = cmath.sqrt(2 * constants.m * e) / constants.hbar
self.multiple(v_out, e, cells)
def single(self, v_out, v_in, cell):
"""Configure the transfermatrix of a single unitcell
Parameters
----------
v_out : float
Wavevector in free space
v_in : float
Wavevector in the potential barrier
cell : Cell
unitcell of scattering region
"""
self._M = self._transfer_matrix_unitcell(v_out, v_in, cell)
def multiple(self, v_out, e, cells):
"""Configure the transfermatrix of multiple unitcells
Parameters
----------
v_out : float
Wavevector in free space
e : float
Energy of the particle
cells : array_like of Cell
unitcells of the scattering region
"""
m_total = 1
for cell in cells:
v_in = cmath.sqrt(2 * constants.m * (cell.v - e)) / constants.hbar
m = self._transfer_matrix_unitcell(v_out, v_in, cell)
m_total = np.dot(m, m_total)
self._M = m_total
def diagonalize(self, base_matrix):
"""Represent the current transfermatrix in the base of the given matrix
Parameters
----------
base_matrix : ndarray
Base matrix
"""
q = linalg.eig(base_matrix)[1]
m_diag = np.dot(linalg.inv(q), np.dot(self._M, q))
self._M = m_diag
def transmission_curve(self, xlim, cells, steps=1000):
"""Calculate transmission values for energies in a given range
Parameters
----------
xlim : array_like of float
energy range for calculating transmission curve, consisting of
the start and end value.
cells : array_like of Cell
unitcells of the scattering region
steps : int
number of energy levels to calculate
Returns
-------
data : array_like of ndarray
e and t data of the transmission curve
"""
e_values = np.linspace(*xlim, steps)
t_values = []
for e in e_values:
self.cells(e, cells)
t_values.append(self.t)
return e_values, t_values
def _transfer_matrix_unitcell(self, v_out, v_in, cell):
"""Calculate the transfer matrix for a unitcell
Parameters
----------
v_out : float
Wavevector in free space
v_in : float
Wavevector in the potential barrier
cell : Cell
unitcell
Returns
-------
M : ndarray
"""
if cell.fr == 0:
m = self._transfer_matrix_barrier(v_out, v_in, cell.d)
else:
m_b = self._transfer_matrix_barrier(v_out, v_in, cell.d)
m_f = self._transfer_matrix_free(v_out, cell.fr)
m = | np.dot(m_f, m_b) | numpy.dot |
import vtk
import numpy as np
from vtk.util.numpy_support import vtk_to_numpy
from vedo import *
import copy
class MouseInteractorPlacePoint(vtk.vtkInteractorStyleTrackballCamera):
'''
DOTO.
'''
def __init__(self,_sphere1, _sphere2, _render1, _render2, _data1, _data2):
self.AddObserver("LeftButtonPressEvent",self.leftButtonPressEvent)
self.sphere1 = _sphere1
self.sphere2 = _sphere2
self.render1 = _render1
self.render2 = _render2
self.data1 = _data1
self.data2 = _data2
self.render2.SetActiveCamera( self.render1.GetActiveCamera())
self.NumberOfClicks = 0
self.ResetPixelDistance = 5
self.PreviousPosition = [0,0]
return
def nearestVertex(self,pt,data):
d = np.linalg.norm(data-pt, axis = -1)
ind = | np.argmin(d) | numpy.argmin |
from __future__ import print_function
import numpy as np
from scipy.optimize import curve_fit
import os, copy
from openmdao.api import IndepVarComp, Component, Group, Problem
from ccblade.ccblade_component import CCBladePower, CCBladeLoads, CCBladeGeometry
from commonse import gravity, NFREQ
from commonse.csystem import DirectionVector
from commonse.utilities import trapz_deriv, interp_with_deriv
from akima import Akima, akima_interp_with_derivs
import _pBEAM
import _bem # TODO: move to rotoraero
from rotorse import RPM2RS, RS2RPM
from rotorse.rotor_geometry import RotorGeometry, TURBULENCE_CLASS, TURBINE_CLASS, DRIVETRAIN_TYPE
from rotorse.rotor_geometry_yaml import ReferenceBlade
from rotorse.precomp import _precomp
# ---------------------
# Base Components
# ---------------------
class BeamPropertiesBase(Component):
def __init__(self, NPTS):
super(BeamPropertiesBase, self).__init__()
self.add_output('beam:z', val=np.zeros(NPTS), units='m', desc='locations of properties along beam')
self.add_output('beam:EA', val=np.zeros(NPTS), units='N', desc='axial stiffness')
self.add_output('beam:EIxx', val=np.zeros(NPTS), units='N*m**2', desc='edgewise stiffness (bending about :ref:`x-direction of airfoil aligned coordinate system <blade_airfoil_coord>`)')
self.add_output('beam:EIyy', val=np.zeros(NPTS), units='N*m**2', desc='flatwise stiffness (bending about y-direction of airfoil aligned coordinate system)')
self.add_output('beam:EIxy', val=np.zeros(NPTS), units='N*m**2', desc='coupled flap-edge stiffness')
self.add_output('beam:GJ', val=np.zeros(NPTS), units='N*m**2', desc='torsional stiffness (about axial z-direction of airfoil aligned coordinate system)')
self.add_output('beam:rhoA', val=np.zeros(NPTS), units='kg/m', desc='mass per unit length')
self.add_output('beam:rhoJ', val=np.zeros(NPTS), units='kg*m', desc='polar mass moment of inertia per unit length')
self.add_output('beam:Tw_iner', val=np.zeros(NPTS), units='m', desc='y-distance to elastic center from point about which above structural properties are computed')
self.add_output('beam:x_ec', val=np.zeros(NPTS), units='m', desc='x-distance to elastic center from point about which above structural properties are computed (airfoil aligned coordinate system)')
self.add_output('beam:y_ec', val=np.zeros(NPTS), units='m', desc='y-distance to elastic center from point about which above structural properties are computed')
self.add_output('beam:flap_iner', val=np.zeros(NPTS), units='kg/m', desc='Section flap inertia about the Y_G axis per unit length.')
self.add_output('beam:edge_iner', val=np.zeros(NPTS), units='kg/m', desc='Section lag inertia about the X_G axis per unit length')
class StrucBase(Component):
def __init__(self, NPTS):
super(StrucBase, self).__init__()
# all inputs/outputs in airfoil coordinate system
self.add_param('Px_defl', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil x-direction at max deflection condition')
self.add_param('Py_defl', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil y-direction at max deflection condition')
self.add_param('Pz_defl', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil z-direction at max deflection condition')
self.add_param('Px_strain', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil x-direction at max strain condition')
self.add_param('Py_strain', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil y-direction at max strain condition')
self.add_param('Pz_strain', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil z-direction at max strain condition')
self.add_param('Px_pc_defl', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil x-direction for deflection used in generated power curve')
self.add_param('Py_pc_defl', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil y-direction for deflection used in generated power curve')
self.add_param('Pz_pc_defl', val=np.zeros(NPTS), desc='distributed load (force per unit length) in airfoil z-direction for deflection used in generated power curve')
self.add_param('xu_strain_spar', val=np.zeros(NPTS), desc='x-position of midpoint of spar cap on upper surface for strain calculation')
self.add_param('xl_strain_spar', val=np.zeros(NPTS), desc='x-position of midpoint of spar cap on lower surface for strain calculation')
self.add_param('yu_strain_spar', val=np.zeros(NPTS), desc='y-position of midpoint of spar cap on upper surface for strain calculation')
self.add_param('yl_strain_spar', val=np.zeros(NPTS), desc='y-position of midpoint of spar cap on lower surface for strain calculation')
self.add_param('xu_strain_te', val=np.zeros(NPTS), desc='x-position of midpoint of trailing-edge panel on upper surface for strain calculation')
self.add_param('xl_strain_te', val=np.zeros(NPTS), desc='x-position of midpoint of trailing-edge panel on lower surface for strain calculation')
self.add_param('yu_strain_te', val=np.zeros(NPTS), desc='y-position of midpoint of trailing-edge panel on upper surface for strain calculation')
self.add_param('yl_strain_te', val=np.zeros(NPTS), desc='y-position of midpoint of trailing-edge panel on lower surface for strain calculation')
self.add_param('Mx_damage', val=np.zeros(NPTS), units='N*m', desc='damage equivalent moments about airfoil x-direction')
self.add_param('My_damage', val=np.zeros(NPTS), units='N*m', desc='damage equivalent moments about airfoil y-direction')
self.add_param('strain_ult_spar', val=0.0, desc='ultimate strain in spar cap')
self.add_param('strain_ult_te', val=0.0, desc='uptimate strain in trailing-edge panels')
self.add_param('gamma_fatigue', val=0.0, desc='safety factor for fatigue')
self.add_param('m_damage', val=0.0, desc='slope of S-N curve for fatigue analysis')
self.add_param('lifetime', val=0.0, units='year', desc='number of years used in fatigue analysis')
self.add_param('beam:z', val=np.zeros(NPTS), units='m', desc='locations of properties along beam')
self.add_param('beam:EA', val=np.zeros(NPTS), units='N', desc='axial stiffness')
self.add_param('beam:EIxx', val=np.zeros(NPTS), units='N*m**2', desc='edgewise stiffness (bending about :ref:`x-direction of airfoil aligned coordinate system <blade_airfoil_coord>`)')
self.add_param('beam:EIyy', val=np.zeros(NPTS), units='N*m**2', desc='flatwise stiffness (bending about y-direction of airfoil aligned coordinate system)')
self.add_param('beam:EIxy', val=np.zeros(NPTS), units='N*m**2', desc='coupled flap-edge stiffness')
self.add_param('beam:GJ', val=np.zeros(NPTS), units='N*m**2', desc='torsional stiffness (about axial z-direction of airfoil aligned coordinate system)')
self.add_param('beam:rhoA', val=np.zeros(NPTS), units='kg/m', desc='mass per unit length')
self.add_param('beam:rhoJ', val=np.zeros(NPTS), units='kg*m', desc='polar mass moment of inertia per unit length')
self.add_param('beam:x_ec', val=np.zeros(NPTS), units='m', desc='x-distance to elastic center from point about which above structural properties are computed (airfoil aligned coordinate system)')
self.add_param('beam:y_ec', val=np.zeros(NPTS), units='m', desc='y-distance to elastic center from point about which above structural properties are computed')
# outputs
self.add_output('blade_mass', val=0.0, units='kg', desc='mass of one blades')
self.add_output('blade_moment_of_inertia', val=0.0, units='kg*m**2', desc='out of plane moment of inertia of a blade')
self.add_output('freq', val=np.zeros(NFREQ), units='Hz', desc='first nF natural frequencies of blade')
self.add_output('dx_defl', val=np.zeros(NPTS), desc='deflection of blade section in airfoil x-direction under max deflection loading')
self.add_output('dy_defl', val=np.zeros(NPTS), desc='deflection of blade section in airfoil y-direction under max deflection loading')
self.add_output('dz_defl', val=np.zeros(NPTS), desc='deflection of blade section in airfoil z-direction under max deflection loading')
self.add_output('dx_pc_defl', val=np.zeros(NPTS), desc='deflection of blade section in airfoil x-direction under power curve loading')
self.add_output('dy_pc_defl', val=np.zeros(NPTS), desc='deflection of blade section in airfoil y-direction under power curve loading')
self.add_output('dz_pc_defl', val=np.zeros(NPTS), desc='deflection of blade section in airfoil z-direction under power curve loading')
self.add_output('strainU_spar', val=np.zeros(NPTS), desc='strain in spar cap on upper surface at location xu,yu_strain with loads P_strain')
self.add_output('strainL_spar', val=np.zeros(NPTS), desc='strain in spar cap on lower surface at location xl,yl_strain with loads P_strain')
self.add_output('strainU_te', val=np.zeros(NPTS), desc='strain in trailing-edge panels on upper surface at location xu,yu_te with loads P_te')
self.add_output('strainL_te', val=np.zeros(NPTS), desc='strain in trailing-edge panels on lower surface at location xl,yl_te with loads P_te')
self.add_output('damageU_spar', val=np.zeros(NPTS), desc='fatigue damage on upper surface in spar cap')
self.add_output('damageL_spar', val=np.zeros(NPTS), desc='fatigue damage on lower surface in spar cap')
self.add_output('damageU_te', val=np.zeros(NPTS), desc='fatigue damage on upper surface in trailing-edge panels')
self.add_output('damageL_te', val=np.zeros(NPTS), desc='fatigue damage on lower surface in trailing-edge panels')
'''
class aeroloads(Component):
def __init__(self):
super(aeroloads, self).__init__()
self.add_param('r', val=0.0, units='m', desc='radial positions along blade going toward tip')
self.add_param('Px', val=0.0, units='N/m', desc='distributed loads in blade-aligned x-direction')
self.add_param('Py', val=0.0, units='N/m', desc='distributed loads in blade-aligned y-direction')
self.add_param('Pz', val=0.0, units='N/m', desc='distributed loads in blade-aligned z-direction')
self.add_param('V', val=0.0, units='m/s', desc='hub height wind speed')
self.add_param('Omega', val=0.0, units='rpm', desc='rotor rotation speed')
self.add_param('pitch', val=0.0, units='deg', desc='pitch angle')
self.add_param('T', val=0.0, units='deg', desc='azimuthal angle')
'''
# ---------------------
# Components
# ---------------------
class ResizeCompositeSection(Component):
def __init__(self, NPTS):
super(ResizeCompositeSection, self).__init__()
self.add_param('chord', val=np.zeros(NPTS), units='m', desc='structural chord distribution')
self.add_param('sparT', val=np.zeros(NPTS), units='m', desc='structural spar cap thickness distribution')
self.add_param('teT', val=np.zeros(NPTS), units='m', desc='structural trailing-edge panel thickness distribution')
self.add_param('upperCS_in', val=np.zeros(NPTS), desc='list of CompositeSection objections defining the properties for upper surface', pass_by_obj=True)
self.add_param('lowerCS_in', val=np.zeros(NPTS), desc='list of CompositeSection objections defining the properties for lower surface', pass_by_obj=True)
self.add_param('websCS_in', val=np.zeros(NPTS), desc='list of CompositeSection objections defining the properties for shear webs', pass_by_obj=True)
self.add_param('chord_ref', val=np.zeros(NPTS), desc='Chord distribution for reference section, thickness of structural layup scaled with reference thickness (fixed t/c)')
self.add_param('sector_idx_strain_spar_ss', val=np.zeros(NPTS, dtype=np.int_), desc='Index of sector for spar (PreComp definition of sector)', pass_by_obj=True)
self.add_param('sector_idx_strain_spar_ps', val=np.zeros(NPTS, dtype=np.int_), desc='Index of sector for spar (PreComp definition of sector)', pass_by_obj=True)
self.add_param('sector_idx_strain_te_ss', val=np.zeros(NPTS, dtype=np.int_), desc='Index of sector for trailing edge (PreComp definition of sector)', pass_by_obj=True)
self.add_param('sector_idx_strain_te_ps', val=np.zeros(NPTS, dtype=np.int_), desc='Index of sector for trailing edge (PreComp definition of sector)', pass_by_obj=True)
# out
self.add_output('upperCS', val=np.zeros(NPTS), desc='list of CompositeSection objections defining the properties for upper surface', pass_by_obj=True)
self.add_output('lowerCS', val=np.zeros(NPTS), desc='list of CompositeSection objections defining the properties for lower surface', pass_by_obj=True)
self.add_output('websCS', val=np.zeros(NPTS), desc='list of CompositeSection objections defining the properties for shear webs', pass_by_obj=True)
self.deriv_options['type'] = 'fd'
self.deriv_options['step_calc'] = 'relative'
self.deriv_options['form'] = 'central'
self.deriv_options['check_form'] = 'central'
self.deriv_options['step_size'] = 1e-5
def solve_nonlinear(self, params, unknowns, resids):
chord = params['chord']
sparT = params['sparT']
teT = params['teT']
upperCS = params['upperCS_in']
lowerCS = params['lowerCS_in']
websCS = params['websCS_in']
strain_idx_spar_ss = params['sector_idx_strain_spar_ss']
strain_idx_spar_ps = params['sector_idx_strain_spar_ps']
strain_idx_te_ss = params['sector_idx_strain_te_ss']
strain_idx_te_ps = params['sector_idx_strain_te_ps']
# scale all thicknesses with airfoil thickness
# TODO: remove fixed t/c assumption
# factor = t_str / tref
factor = chord / params['chord_ref'] # same as thickness ratio for constant t/c
for i in range(chord.size):
upperCS[i].t = [m*factor[i] for m in upperCS[i].t]
lowerCS[i].t = [m*factor[i] for m in lowerCS[i].t]
websCS[i].t = [m*factor[i] for m in websCS[i].t]
idx_spar_ss = strain_idx_spar_ss[i]
idx_te_ss = strain_idx_te_ss[i]
idx_spar_ps = strain_idx_spar_ps[i]
idx_te_ps = strain_idx_te_ps[i]
# upper and lower have same thickness for this design
tspar_ss = np.sum(upperCS[i].t[idx_spar_ss])
tte_ss = np.sum(upperCS[i].t[idx_te_ss])
tspar_ps = np.sum(upperCS[i].t[idx_spar_ps])
tte_ps = np.sum(upperCS[i].t[idx_te_ps])
upperCS[i].t[idx_spar_ss] *= sparT[i]/tspar_ss
lowerCS[i].t[idx_spar_ps] *= sparT[i]/tspar_ps
upperCS[i].t[idx_te_ss] *= teT[i]/tte_ss
lowerCS[i].t[idx_te_ps] *= teT[i]/tte_ps
unknowns['upperCS'] = upperCS
unknowns['lowerCS'] = lowerCS
unknowns['websCS'] = websCS
class PreCompSections(BeamPropertiesBase):
def __init__(self, NPTS):
super(PreCompSections, self).__init__(NPTS)
self.add_param('r', val=np.zeros(NPTS), units='m', desc='radial positions. r[0] should be the hub location \
while r[-1] should be the blade tip. Any number \
of locations can be specified between these in ascending order.')
self.add_param('chord', val=np.zeros(NPTS), units='m', desc='array of chord lengths at corresponding radial positions')
self.add_param('theta', val=np.zeros(NPTS), units='deg', desc='array of twist angles at corresponding radial positions. \
(positive twist decreases angle of attack)')
self.add_param('le_location', val=np.zeros(NPTS), desc='Leading-edge positions from a reference blade axis (usually blade pitch axis). Locations are normalized by the local chord length. Positive in -x direction for airfoil-aligned coordinate system')
self.add_param('materials', val=np.zeros(NPTS), desc='material properties of composite materials', pass_by_obj=True)
self.add_param('upperCS', val=np.zeros(NPTS), desc='list of CompositeSection objections defining the properties for upper surface', pass_by_obj=True)
self.add_param('lowerCS', val=np.zeros(NPTS), desc='list of CompositeSection objections defining the properties for lower surface', pass_by_obj=True)
self.add_param('websCS', val=np.zeros(NPTS), desc='list of CompositeSection objections defining the properties for shear webs', pass_by_obj=True)
self.add_param('profile', val=np.zeros(NPTS), desc='list of CompositeSection profiles', pass_by_obj=True)
self.add_param('sector_idx_strain_spar_ps', val=np.zeros(NPTS, dtype=np.int_), desc='Index of sector for spar (PreComp definition of sector)', pass_by_obj=True)
self.add_param('sector_idx_strain_spar_ss', val=np.zeros(NPTS, dtype=np.int_), desc='Index of sector for spar (PreComp definition of sector)', pass_by_obj=True)
self.add_param('sector_idx_strain_te_ps', val=np.zeros(NPTS, dtype=np.int_), desc='Index of sector for trailing edge (PreComp definition of sector)', pass_by_obj=True)
self.add_param('sector_idx_strain_te_ss', val=np.zeros(NPTS, dtype=np.int_), desc='Index of sector for trailing edge (PreComp definition of sector)', pass_by_obj=True)
self.add_output('eps_crit_spar', val=np.zeros(NPTS), desc='critical strain in spar from panel buckling calculation')
self.add_output('eps_crit_te', val=np.zeros(NPTS), desc='critical strain in trailing-edge panels from panel buckling calculation')
self.add_output('xu_strain_spar', val=np.zeros(NPTS), desc='x-position of midpoint of spar cap on upper surface for strain calculation')
self.add_output('xl_strain_spar', val=np.zeros(NPTS), desc='x-position of midpoint of spar cap on lower surface for strain calculation')
self.add_output('yu_strain_spar', val=np.zeros(NPTS), desc='y-position of midpoint of spar cap on upper surface for strain calculation')
self.add_output('yl_strain_spar', val=np.zeros(NPTS), desc='y-position of midpoint of spar cap on lower surface for strain calculation')
self.add_output('xu_strain_te', val=np.zeros(NPTS), desc='x-position of midpoint of trailing-edge panel on upper surface for strain calculation')
self.add_output('xl_strain_te', val=np.zeros(NPTS), desc='x-position of midpoint of trailing-edge panel on lower surface for strain calculation')
self.add_output('yu_strain_te', val=np.zeros(NPTS), desc='y-position of midpoint of trailing-edge panel on upper surface for strain calculation')
self.add_output('yl_strain_te', val=np.zeros(NPTS), desc='y-position of midpoint of trailing-edge panel on lower surface for strain calculation')
self.deriv_options['type'] = 'fd'
self.deriv_options['step_calc'] = 'relative'
self.deriv_options['form'] = 'central'
self.deriv_options['check_form'] = 'central'
self.deriv_options['step_size'] = 1e-5
def criticalStrainLocations(self, params, sector_idx_strain_ss, sector_idx_strain_ps, x_ec_nose, y_ec_nose):
chord = params['chord']
upperCS = params['upperCS']
lowerCS = params['lowerCS']
profile = params['profile']
NPTS = chord.size
# find corresponding locations on airfoil at midpoint of sector
xun = np.zeros(NPTS)
xln = np.zeros(NPTS)
yun = np.zeros(NPTS)
yln = np.zeros(NPTS)
for i in range(NPTS):
csU = upperCS[i]
csL = lowerCS[i]
pf = profile[i]
idx_ss = sector_idx_strain_ss[i]
idx_ps = sector_idx_strain_ps[i]
if idx_ss == None:
xun[i] = 0.
xln[i] = 0.
yun[i] = 0.
yln[i] = 0.
else:
xun[i] = 0.5*(csU.loc[idx_ss] + csU.loc[idx_ss+1])
xln[i] = 0.5*(csL.loc[idx_ps] + csL.loc[idx_ps+1])
yun[i] = np.interp(xun[i], pf.x, pf.yu)
yln[i] = np.interp(xln[i], pf.x, pf.yl)
# make dimensional and define relative to elastic center
xu = xun*chord - x_ec_nose
xl = xln*chord - x_ec_nose
yu = yun*chord - y_ec_nose
yl = yln*chord - y_ec_nose
# switch to airfoil coordinate system
xu, yu = yu, xu
xl, yl = yl, xl
return xu, xl, yu, yl
def panelBucklingStrain(self, params, sector_idx_strain_ss):
"""
see chapter on Structural Component Design Techniques from <NAME>
section 6.2: Design of composite panels
assumes: large aspect ratio, simply supported, uniaxial compression, flat rectangular plate
"""
# rename
chord = params['chord']
CS_list = params['upperCS']
# initialize
nsec = len(chord)
eps_crit = np.zeros(nsec)
for i in range(nsec):
cs = CS_list[i]
sector_idx = sector_idx_strain_ss[i]
if sector_idx == None:
eps_crit[i] = 0.
else:
# chord-wise length of sector
sector_length = chord[i] * (cs.loc[sector_idx+1] - cs.loc[sector_idx])
# get matrices
A, B, D, totalHeight = cs.compositeMatrices(sector_idx)
E = cs.effectiveEAxial(sector_idx)
D1 = D[0, 0]
D2 = D[1, 1]
D3 = D[0, 1] + 2*D[2, 2]
# use empirical formula
Nxx = 2 * (np.pi/sector_length)**2 * (np.sqrt(D1*D2) + D3)
eps_crit[i] = - Nxx / totalHeight / E
return eps_crit
def solve_nonlinear(self, params, unknowns, resids):
r = params['r']
chord = params['chord']
mat = params['materials']
leLoc = params['le_location']
upperCS = params['upperCS']
lowerCS = params['lowerCS']
websCS = params['websCS']
profile = params['profile']
theta = params['theta']
strain_idx_spar_ss = params['sector_idx_strain_spar_ss']
strain_idx_spar_ps = params['sector_idx_strain_spar_ps']
strain_idx_te_ss = params['sector_idx_strain_te_ss']
strain_idx_te_ps = params['sector_idx_strain_te_ps']
# radial discretization
nsec = len(r)
# initialize variables
beam_z = r
beam_EA = np.zeros(nsec)
beam_EIxx = np.zeros(nsec)
beam_EIyy = np.zeros(nsec)
beam_EIxy = np.zeros(nsec)
beam_GJ = np.zeros(nsec)
beam_rhoA = np.zeros(nsec)
beam_rhoJ = np.zeros(nsec)
beam_Tw_iner = np.zeros(nsec)
beam_flap_iner = np.zeros(nsec)
beam_edge_iner = np.zeros(nsec)
# distance to elastic center from point about which structural properties are computed
# using airfoil coordinate system
beam_x_ec = np.zeros(nsec)
beam_y_ec = np.zeros(nsec)
# distance to elastic center from airfoil nose
# using profile coordinate system
x_ec_nose = np.zeros(nsec)
y_ec_nose = np.zeros(nsec)
csU = upperCS
csL = lowerCS
csW = websCS
# twist rate
th_prime = _precomp.tw_rate(r, theta)
# arrange materials into list
n = len(mat)
E1 = [0]*n
E2 = [0]*n
G12 = [0]*n
nu12 = [0]*n
rho = [0]*n
for i in range(n):
E1[i] = mat[i].E1
E2[i] = mat[i].E2
G12[i] = mat[i].G12
nu12[i] = mat[i].nu12
rho[i] = mat[i].rho
# for i in range(nsec):
# print(csW[i], type(csW[i]))
for i in range(nsec):
xnode, ynode = profile[i]._preCompFormat()
locU, n_laminaU, n_pliesU, tU, thetaU, mat_idxU = csU[i]._preCompFormat()
locL, n_laminaL, n_pliesL, tL, thetaL, mat_idxL = csL[i]._preCompFormat()
locW, n_laminaW, n_pliesW, tW, thetaW, mat_idxW = csW[i]._preCompFormat()
nwebs = len(locW)
# address a bug in f2py (need to pass in length 1 arrays even though they are not used)
if nwebs == 0:
locW = [0]
n_laminaW = [0]
n_pliesW = [0]
tW = [0]
thetaW = [0]
mat_idxW = [0]
try:
results = _precomp.properties(chord[i], theta[i],
th_prime[i], leLoc[i],
xnode, ynode, E1, E2, G12, nu12, rho,
locU, n_laminaU, n_pliesU, tU, thetaU, mat_idxU,
locL, n_laminaL, n_pliesL, tL, thetaL, mat_idxL,
nwebs, locW, n_laminaW, n_pliesW, tW, thetaW, mat_idxW)
except:
print(i)
# print('chord[i]', chord[i])
# print('theta[i]', theta[i])
# print('th_prime[i]', th_prime[i])
# print('leLoc[i]', leLoc[i])
# print('xnode', xnode)
# print('ynode', ynode)
# print('E1', E1)
# print('E2', E2)
# print('G12', G12)
# print('nu12', nu12)
# print('rho', rho)
# print('locU', locU)
# print('n_laminaU', n_laminaU)
# print('n_pliesU', n_pliesU)
# print('tU', tU)
# print('thetaU', thetaU)
# print('mat_idxU', mat_idxU)
# print('locL', locL)
# print('n_laminaL', n_laminaL)
# print('n_pliesL', n_pliesL)
# print('tL', tL)
# print('thetaL', thetaL)
# print('mat_idxL', mat_idxL)
# print('nwebs', nwebs)
# print('locW', locW)
# print('n_laminaW', n_laminaW)
# print('n_pliesW', n_pliesW)
# print('tW', tW)
# print('thetaW', thetaW)
# print('mat_idxW ', mat_idxW )
import matplotlib.pyplot as plt
plt.plot(xnode,ynode)
plt.axis('equal')
plt.savefig('debugging_%d.png'%i)
# plt.show()
results = _precomp.properties(chord[i], theta[i],
th_prime[i], leLoc[i],
xnode, ynode, E1, E2, G12, nu12, rho,
locU, n_laminaU, n_pliesU, tU, thetaU, mat_idxU,
locL, n_laminaL, n_pliesL, tL, thetaL, mat_idxL,
nwebs, locW, n_laminaW, n_pliesW, tW, thetaW, mat_idxW)
beam_EIxx[i] = results[1] # EIedge
beam_EIyy[i] = results[0] # EIflat
beam_GJ[i] = results[2]
beam_EA[i] = results[3]
beam_EIxy[i] = results[4] # EIflapedge
beam_x_ec[i] = results[12] - results[10]
beam_y_ec[i] = results[13] - results[11]
beam_rhoA[i] = results[14]
beam_rhoJ[i] = results[15] + results[16] # perpindicular axis theorem
beam_Tw_iner[i] = results[17]
beam_flap_iner[i] = results[15]
beam_edge_iner[i] = results[16]
x_ec_nose[i] = results[13] + leLoc[i]*chord[i]
y_ec_nose[i] = results[12] # switch b.c of coordinate system used
# beam_EIxx[i] = None # EIedge
# beam_EIyy[i] = None # EIflat
# beam_GJ[i] = None
# beam_EA[i] = None
# beam_EIxy[i] = None # EIflapedge
# beam_x_ec[i] = None - results[10]
# beam_y_ec[i] = None - results[11]
# beam_rhoA[i] = None
# beam_rhoJ[i] = None + results[16] # perpindicular axis theorem
# beam_Tw_iner[i] = None
# beam_flap_iner[i] = None
# beam_edge_iner[i] = None
# x_ec_nose[i] = None + leLoc[i]*chord[i]
# y_ec_nose[i] = None # switch b.c of coordinate system used
unknowns['beam:z'] = beam_z
unknowns['beam:EIxx'] = beam_EIxx
unknowns['beam:EIyy'] = beam_EIyy
unknowns['beam:GJ'] = beam_GJ
unknowns['beam:EA'] = beam_EA
unknowns['beam:EIxy'] = beam_EIxy
unknowns['beam:x_ec'] = beam_x_ec
unknowns['beam:y_ec'] = beam_y_ec
unknowns['beam:rhoA'] = beam_rhoA
unknowns['beam:rhoJ'] = beam_rhoJ
unknowns['beam:Tw_iner'] = beam_Tw_iner
unknowns['beam:flap_iner'] = beam_flap_iner
unknowns['beam:edge_iner'] = beam_edge_iner
unknowns['eps_crit_spar'] = self.panelBucklingStrain(params, strain_idx_spar_ss)
unknowns['eps_crit_te'] = self.panelBucklingStrain(params, strain_idx_te_ss)
xu_strain_spar, xl_strain_spar, yu_strain_spar, yl_strain_spar = self.criticalStrainLocations(params, strain_idx_spar_ss, strain_idx_spar_ps, x_ec_nose, y_ec_nose)
xu_strain_te, xl_strain_te, yu_strain_te, yl_strain_te = self.criticalStrainLocations(params, strain_idx_te_ss, strain_idx_te_ps, x_ec_nose, y_ec_nose)
unknowns['xu_strain_spar'] = xu_strain_spar
unknowns['xl_strain_spar'] = xl_strain_spar
unknowns['yu_strain_spar'] = yu_strain_spar
unknowns['yl_strain_spar'] = yl_strain_spar
unknowns['xu_strain_te'] = xu_strain_te
unknowns['xl_strain_te'] = xl_strain_te
unknowns['yu_strain_te'] = yu_strain_te
unknowns['yl_strain_te'] = yl_strain_te
class BladeCurvature(Component):
def __init__(self, NPTS):
super(BladeCurvature, self).__init__()
self.add_param('r', val=np.zeros(NPTS), units='m', desc='location in blade z-coordinate')
self.add_param('precurve', val=np.zeros(NPTS), units='m', desc='location in blade x-coordinate')
self.add_param('presweep', val=np.zeros(NPTS), units='m', desc='location in blade y-coordinate')
self.add_param('precone', val=0.0, units='deg', desc='precone angle')
self.add_output('totalCone', val=np.zeros(NPTS), units='deg', desc='total cone angle from precone and curvature')
self.add_output('x_az', val=np.zeros(NPTS), units='m', desc='location of blade in azimuth x-coordinate system')
self.add_output('y_az', val=np.zeros(NPTS), units='m', desc='location of blade in azimuth y-coordinate system')
self.add_output('z_az', val=np.zeros(NPTS), units='m', desc='location of blade in azimuth z-coordinate system')
self.add_output('s', val=np.zeros(NPTS), units='m', desc='cumulative path length along blade')
self.deriv_options['form'] = 'central'
self.deriv_options['check_form'] = 'central'
self.deriv_options['step_calc'] = 'relative'
def solve_nonlinear(self, params, unknowns, resids):
# self.x_az, self.y_az, self.z_az, cone, s = \
# _bem.definecurvature(self.r, self.precurve, self.presweep, 0.0)
self.r = params['r']
self.precurve = params['precurve']
self.presweep = params['presweep']
self.precone = params['precone']
n = len(self.r)
dx_dx = np.eye(3*n)
self.x_az, x_azd, self.y_az, y_azd, self.z_az, z_azd, \
cone, coned, s, sd = _bem.definecurvature_dv2(self.r, dx_dx[:, :n],
self.precurve, dx_dx[:, n:2*n], self.presweep, dx_dx[:, 2*n:], 0.0, np.zeros(3*n))
self.totalCone = self.precone + np.degrees(cone)
self.s = self.r[0] + s
unknowns['totalCone'] = self.totalCone
unknowns['x_az'] = self.x_az
unknowns['y_az'] = self.y_az
unknowns['z_az'] = self.z_az
unknowns['s'] = self.s
dxaz_dr = x_azd[:n, :].T
dxaz_dprecurve = x_azd[n:2*n, :].T
dxaz_dpresweep = x_azd[2*n:, :].T
dyaz_dr = y_azd[:n, :].T
dyaz_dprecurve = y_azd[n:2*n, :].T
dyaz_dpresweep = y_azd[2*n:, :].T
dzaz_dr = z_azd[:n, :].T
dzaz_dprecurve = z_azd[n:2*n, :].T
dzaz_dpresweep = z_azd[2*n:, :].T
dcone_dr = np.degrees(coned[:n, :]).T
dcone_dprecurve = np.degrees(coned[n:2*n, :]).T
dcone_dpresweep = np.degrees(coned[2*n:, :]).T
ds_dr = sd[:n, :].T
ds_dr[:, 0] += 1
ds_dprecurve = sd[n:2*n, :].T
ds_dpresweep = sd[2*n:, :].T
J = {}
J['x_az', 'r'] = dxaz_dr
J['x_az', 'precurve'] = dxaz_dprecurve
J['x_az', 'presweep'] = dxaz_dpresweep
J['x_az', 'precone'] = np.zeros(n)
J['y_az', 'r'] = dyaz_dr
J['y_az', 'precurve'] = dyaz_dprecurve
J['y_az', 'presweep'] = dyaz_dpresweep
J['y_az', 'precone'] = np.zeros(n)
J['z_az', 'r'] = dzaz_dr
J['z_az', 'precurve'] = dzaz_dprecurve
J['z_az', 'presweep'] = dzaz_dpresweep
J['z_az', 'precone'] = np.zeros(n)
J['totalCone', 'r'] = dcone_dr
J['totalCone', 'precurve'] = dcone_dprecurve
J['totalCone', 'presweep'] = dcone_dpresweep
J['totalCone', 'precone'] = np.ones(n)
J['s', 'r'] = ds_dr
J['s', 'precurve'] = ds_dprecurve
J['s', 'presweep'] = ds_dpresweep
J['s', 'precone'] = np.zeros(n)
self.J = J
def list_deriv_vars(self):
inputs = ('r', 'precurve', 'presweep', 'precone')
outputs = ('x_az', 'y_az', 'z_az', 'totalCone', 's')
return inputs, outputs
def linearize(self, params, unknowns, resids):
return self.J
class CurveFEM(Component):
def __init__(self, NPTS):
super(CurveFEM, self).__init__()
"""natural frequencies for curved blades"""
self.add_param('Omega', val=0.0, units='rpm', desc='rotor rotation frequency')
self.add_param('beam:z', val=np.zeros(NPTS), units='m', desc='locations of properties along beam')
self.add_param('beam:EA', val=np.zeros(NPTS), units='N', desc='axial stiffness')
self.add_param('beam:EIxx', val=np.zeros(NPTS), units='N*m**2', desc='edgewise stiffness (bending about :ref:`x-direction of airfoil aligned coordinate system <blade_airfoil_coord>`)')
self.add_param('beam:EIyy', val=np.zeros(NPTS), units='N*m**2', desc='flatwise stiffness (bending about y-direction of airfoil aligned coordinate system)')
self.add_param('beam:EIxy', val=np.zeros(NPTS), units='N*m**2', desc='coupled flap-edge stiffness')
self.add_param('beam:GJ', val=np.zeros(NPTS), units='N*m**2', desc='torsional stiffness (about axial z-direction of airfoil aligned coordinate system)')
self.add_param('beam:rhoA', val=np.zeros(NPTS), units='kg/m', desc='mass per unit length')
self.add_param('beam:rhoJ', val=np.zeros(NPTS), units='kg*m', desc='polar mass moment of inertia per unit length')
self.add_param('beam:x_ec', val=np.zeros(NPTS), units='m', desc='x-distance to elastic center from point about which above structural properties are computed (airfoil aligned coordinate system)')
self.add_param('beam:y_ec', val=np.zeros(NPTS), units='m', desc='y-distance to elastic center from point about which above structural properties are computed')
self.add_param('beam:Tw_iner', val=np.zeros(NPTS), units='m', desc='y-distance to elastic center from point about which above structural properties are computed')
self.add_param('beam:flap_iner', val=np.zeros(NPTS), units='kg/m', desc='Section flap inertia about the Y_G axis per unit length.')
self.add_param('beam:edge_iner', val=np.zeros(NPTS), units='kg/m', desc='Section lag inertia about the X_G axis per unit length')
self.add_param('theta', val=np.zeros(NPTS), units='deg', desc='structural twist distribution')
self.add_param('precurve', val=np.zeros(NPTS), units='m', desc='structural precuve (see FAST definition)')
self.add_param('presweep', val=np.zeros(NPTS), units='m', desc='structural presweep (see FAST definition)')
self.add_output('freq', val=np.zeros(NFREQ), units='Hz', desc='first nF natural frequencies')
self.add_output('modes_coef', val=np.zeros((3, 5)), desc='mode shapes as 6th order polynomials, in the format accepted by ElastoDyn, [[c_x2, c_],..]')
# self.add_output('')
self.deriv_options['type'] = 'fd'
self.deriv_options['step_calc'] = 'relative'
self.deriv_options['form'] = 'central'
self.deriv_options['check_form'] = 'central'
self.deriv_options['step_size'] = 1e-5
def solve_nonlinear(self, params, unknowns, resids):
mycurve = _pBEAM.CurveFEM(params['Omega'], params['beam:Tw_iner'], params['beam:z'], params['precurve'], params['presweep'], params['beam:rhoA'], True)
# mycurve = _pBEAM.CurveFEM(params['Omega'], params['theta'], params['beam:z'], params['precurve'], params['presweep'], params['beam:rhoA'], True)
n = len(params['beam:z'])
freq, eig_vec = mycurve.frequencies(params['beam:EA'], params['beam:EIxx'], params['beam:EIyy'], params['beam:GJ'], params['beam:rhoJ'], n)
unknowns['freq'] = freq[:NFREQ]
# Parse eigen vectors
R = params['beam:z']
R = np.asarray([(Ri-R[0])/(R[-1]-R[0]) for Ri in R])
ndof = 6
flap = np.zeros((NFREQ, n))
edge = np.zeros((NFREQ, n))
for i in range(NFREQ):
eig_vec_i = eig_vec[:,i]
for j in range(n):
flap[i,j] = eig_vec_i[0+j*ndof]
edge[i,j] = eig_vec_i[1+j*ndof]
# Mode shape polynomial fit
def mode_fit(x, a, b, c, d, e):
return a*x**2. + b*x**3. + c*x**4. + d*x**5. + e*x**6.
# First Flapwise
coef, pcov = curve_fit(mode_fit, R, flap[0,:])
coef_norm = [c/sum(coef) for c in coef]
unknowns['modes_coef'][0,:] = coef_norm
# Second Flapwise
coef, pcov = curve_fit(mode_fit, R, flap[1,:])
coef_norm = [c/sum(coef) for c in coef]
unknowns['modes_coef'][1,:] = coef_norm
# First Edgewise
coef, pcov = curve_fit(mode_fit, R, edge[0,:])
coef_norm = [c/sum(coef) for c in coef]
unknowns['modes_coef'][2,:] = coef_norm
# # temp
# from bmodes import BModes_tools
# r = np.asarray([(ri-params['beam:z'][0])/(params['beam:z'][-1]-params['beam:z'][0]) for ri in params['beam:z']])
# prop = np.column_stack((r, params['theta'], params['beam:Tw_iner'], params['beam:rhoA'], params['beam:flap_iner'], params['beam:edge_iner'], params['beam:EIyy'], \
# params['beam:EIxx'], params['beam:GJ'], params['beam:EA'], np.zeros_like(r), np.zeros_like(r), np.zeros_like(r)))
# bm = BModes_tools()
# bm.setup.radius = params['beam:z'][-1]
# bm.setup.hub_rad = params['beam:z'][0]
# bm.setup.precone = -2.5
# bm.prop = prop
# bm.exe_BModes = 'C:/Users/egaertne/WT_Codes/bModes/BModes.exe'
# bm.execute()
# print(bm.freq)
# import matplotlib.pyplot as plt
# fig, ax = plt.subplots(nrows=2, ncols=4, figsize=(12., 6.), sharex=True, sharey=True)
# # fig.subplots_adjust(bottom=0.2, top=0.9)
# fig.subplots_adjust(bottom=0.15, left=0.1, hspace=0, wspace=0)
# i = 0
# k_flap = bm.flap_disp[i,-1]/flap[i,-1]
# k_edge = bm.lag_disp[i,-1]/edge[i,-1]
# ax[0,0].plot(R, flap[i,:]*k_flap ,'k',label='CurveFEM')
# ax[0,0].plot(bm.r[i,:], bm.flap_disp[i,:],'bx',label='BModes')
# ax[0,0].set_ylabel('Flapwise Disp.')
# ax[0,0].set_title('1st Mode')
# ax[1,0].plot(R, edge[i,:]*k_edge ,'k')
# ax[1,0].plot(bm.r[i,:], bm.lag_disp[i,:],'bx')
# ax[1,0].set_ylabel('Edgewise Disp.')
# i = 1
# k_flap = bm.flap_disp[i,-1]/flap[i,-1]
# k_edge = bm.lag_disp[i,-1]/edge[i,-1]
# ax[0,1].plot(R, flap[i,:]*k_flap ,'k')
# ax[0,1].plot(bm.r[i,:], bm.flap_disp[i,:],'bx')
# ax[0,1].set_title('2nd Mode')
# ax[1,1].plot(R, edge[i,:]*k_edge ,'k')
# ax[1,1].plot(bm.r[i,:], bm.lag_disp[i,:],'bx')
# i = 2
# k_flap = bm.flap_disp[i,-1]/flap[i,-1]
# k_edge = bm.lag_disp[i,-1]/edge[i,-1]
# ax[0,2].plot(R, flap[i,:]*k_flap ,'k')
# ax[0,2].plot(bm.r[i,:], bm.flap_disp[i,:],'bx')
# ax[0,2].set_title('3rd Mode')
# ax[1,2].plot(R, edge[i,:]*k_edge ,'k')
# ax[1,2].plot(bm.r[i,:], bm.lag_disp[i,:],'bx')
# fig.legend(loc='lower center', ncol=2)
# i = 3
# k_flap = bm.flap_disp[i,-1]/flap[i,-1]
# k_edge = bm.lag_disp[i,-1]/edge[i,-1]
# ax[0,3].plot(R, flap[i,:]*k_flap ,'k')
# ax[0,3].plot(bm.r[i,:], bm.flap_disp[i,:],'bx')
# ax[0,3].set_title('4th Mode')
# ax[1,3].plot(R, edge[i,:]*k_edge ,'k')
# ax[1,3].plot(bm.r[i,:], bm.lag_disp[i,:],'bx')
# fig.legend(loc='lower center', ncol=2)
# fig.text(0.5, 0.075, 'Blade Spanwise Position, $r/R$', ha='center')
# (n,m)=np.shape(ax)
# for i in range(n):
# for j in range(m):
# ax[i,j].tick_params(axis='both', which='major', labelsize=8)
# ax[i,j].grid(True, linestyle=':')
# plt.show()
class RotorWithpBEAM(StrucBase):
def __init__(self, NPTS):
super(RotorWithpBEAM, self).__init__(NPTS)
self.deriv_options['type'] = 'fd'
self.deriv_options['step_calc'] = 'relative'
self.deriv_options['form'] = 'central'
self.deriv_options['check_form'] = 'central'
self.deriv_options['step_size'] = 1e-5
self.EI11 = None
self.EI22 = None
self.EA = None
self.ca = None
self.sa = None
def principalCS(self, EIyy, EIxx, y_ec, x_ec, EA, EIxy):
# rename (with swap of x, y for profile c.s.)
EIxx , EIyy = EIyy , EIxx
x_ec , y_ec = y_ec , x_ec
self.EA = EA
EIxy = EIxy
# translate to elastic center
EIxx -= y_ec**2*EA
EIyy -= x_ec**2*EA
EIxy -= x_ec*y_ec*EA
# get rotation angle
alpha = 0.5*np.arctan2(2*EIxy, EIyy-EIxx)
self.EI11 = EIxx - EIxy*np.tan(alpha)
self.EI22 = EIyy + EIxy*np.tan(alpha)
# get moments and positions in principal axes
self.ca = | np.cos(alpha) | numpy.cos |
from keras.utils.visualize_util import plot
from matplotlib import pyplot
import numpy as np
import cv2
class Visualization:
# members
def __init__(self, output_path):
self.output_path = output_path;
def visualizeModelArchitecture(self, model):
print("save model...");
plot(model, to_file=self.output_path + "modelVisualization.png", show_shapes=True);
def plotOriginalImageset(self, dataset):
print("plot 9 images...");
# plot 3 images
#print(samples.shape);
#print(samples[0, 0]);
# create a grid of 3x3 images
for i in range(0, 9):
sample = (dataset.sample(i));
#sample = np.array(sample);
pyplot.subplot(330 + 1 + i);
pyplot.imshow(cv2.cvtColor(sample[0], cv2.COLOR_BGR2RGB));
# show the plot
pyplot.show();
def plotZeroCenteredImageset(self, dataset):
print("plot 9 images...");
# plot 3 images
#print(samples.shape);
#print(samples[0, 0]);
# create a grid of 3x3 images
for i in range(0, 9):
sample = (dataset.sample(i));
#sample = np.array(sample);
pyplot.subplot(330 + 1 + i);
pyplot.imshow(sample[0]);
# show the plot
pyplot.show();
def plotNormalizedImageset(self, dataset):
print("plot 9 images...");
# plot 3 images
#print(samples.shape);
#print(samples[0, 0]);
# create a grid of 3x3 images
for i in range(0, 9):
sample = (dataset.sample(i));
maxValue = np.max(np.array(sample[0]));
minValue = np.min( | np.array(sample[0]) | numpy.array |
import os
import numpy as np
import tensorflow as tf
import cv2
import time
import sys
import pickle
import ROLO_utils as util
class YOLO_TF:
fromfile = None
tofile_img = 'test/output.jpg'
tofile_txt = 'test/output.txt'
imshow = True
filewrite_img = False
filewrite_txt = False
disp_console = True
weights_file = '/home/marc/ROLO/3rd\ party_upgrade/weights/YOLO_small.ckpt'
alpha = 0.1
threshold = 0.08
iou_threshold = 0.5
num_class = 20
num_box = 2
grid_size = 7
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
w_img, h_img = [352, 240]
num_feat = 4096
num_predict = 6 # final output of LSTM 6 loc parameters
num_heatmap = 1024
def __init__(self, argvs=[]):
self.argv_parser(argvs)
self.build_networks()
if self.fromfile is not None: self.detect_from_file(self.fromfile)
def argv_parser(self, argvs):
for i in range(1, len(argvs), 2):
if argvs[i] == '-fromfile': self.fromfile = argvs[i + 1]
if argvs[i] == '-tofile_img': self.tofile_img = argvs[i + 1]; self.filewrite_img = True
if argvs[i] == '-tofile_txt': self.tofile_txt = argvs[i + 1]; self.filewrite_txt = True
if argvs[i] == '-imshow':
if argvs[i + 1] == '1':
self.imshow = True
else:
self.imshow = False
if argvs[i] == '-disp_console':
if argvs[i + 1] == '1':
self.disp_console = True
else:
self.disp_console = False
def build_networks(self):
if self.disp_console: print("Building YOLO_small graph...")
self.x = tf.placeholder('float32', [None, 448, 448, 3])
self.conv_1 = self.conv_layer(1, self.x, 64, 7, 2)
self.pool_2 = self.pooling_layer(2, self.conv_1, 2, 2)
self.conv_3 = self.conv_layer(3, self.pool_2, 192, 3, 1)
self.pool_4 = self.pooling_layer(4, self.conv_3, 2, 2)
self.conv_5 = self.conv_layer(5, self.pool_4, 128, 1, 1)
self.conv_6 = self.conv_layer(6, self.conv_5, 256, 3, 1)
self.conv_7 = self.conv_layer(7, self.conv_6, 256, 1, 1)
self.conv_8 = self.conv_layer(8, self.conv_7, 512, 3, 1)
self.pool_9 = self.pooling_layer(9, self.conv_8, 2, 2)
self.conv_10 = self.conv_layer(10, self.pool_9, 256, 1, 1)
self.conv_11 = self.conv_layer(11, self.conv_10, 512, 3, 1)
self.conv_12 = self.conv_layer(12, self.conv_11, 256, 1, 1)
self.conv_13 = self.conv_layer(13, self.conv_12, 512, 3, 1)
self.conv_14 = self.conv_layer(14, self.conv_13, 256, 1, 1)
self.conv_15 = self.conv_layer(15, self.conv_14, 512, 3, 1)
self.conv_16 = self.conv_layer(16, self.conv_15, 256, 1, 1)
self.conv_17 = self.conv_layer(17, self.conv_16, 512, 3, 1)
self.conv_18 = self.conv_layer(18, self.conv_17, 512, 1, 1)
self.conv_19 = self.conv_layer(19, self.conv_18, 1024, 3, 1)
self.pool_20 = self.pooling_layer(20, self.conv_19, 2, 2)
self.conv_21 = self.conv_layer(21, self.pool_20, 512, 1, 1)
self.conv_22 = self.conv_layer(22, self.conv_21, 1024, 3, 1)
self.conv_23 = self.conv_layer(23, self.conv_22, 512, 1, 1)
self.conv_24 = self.conv_layer(24, self.conv_23, 1024, 3, 1)
self.conv_25 = self.conv_layer(25, self.conv_24, 1024, 3, 1)
self.conv_26 = self.conv_layer(26, self.conv_25, 1024, 3, 2)
self.conv_27 = self.conv_layer(27, self.conv_26, 1024, 3, 1)
self.conv_28 = self.conv_layer(28, self.conv_27, 1024, 3, 1)
self.fc_29 = self.fc_layer(29, self.conv_28, 512, flat=True, linear=False)
self.fc_30 = self.fc_layer(30, self.fc_29, 4096, flat=False, linear=False)
# skip dropout_31
self.fc_32 = self.fc_layer(32, self.fc_30, 1470, flat=False, linear=True)
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.weights_file)
if self.disp_console: print("Loading complete!" + '\n')
def conv_layer(self, idx, inputs, filters, size, stride):
channels = inputs.get_shape()[3]
weight = tf.Variable(tf.truncated_normal([size, size, int(channels), filters], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[filters]))
pad_size = size // 2
pad_mat = np.array([[0, 0], [pad_size, pad_size], [pad_size, pad_size], [0, 0]])
inputs_pad = tf.pad(inputs, pad_mat)
conv = tf.nn.conv2d(inputs_pad, weight, strides=[1, stride, stride, 1], padding='VALID',
name=str(idx) + '_conv')
conv_biased = tf.add(conv, biases, name=str(idx) + '_conv_biased')
if self.disp_console: print(
' Layer %d : Type = Conv, Size = %d * %d, Stride = %d, Filters = %d, Input channels = %d' % (
idx, size, size, stride, filters, int(channels)))
return tf.maximum(self.alpha * conv_biased, conv_biased, name=str(idx) + '_leaky_relu')
def pooling_layer(self, idx, inputs, size, stride):
if self.disp_console: print(
' Layer %d : Type = Pool, Size = %d * %d, Stride = %d' % (idx, size, size, stride))
return tf.nn.max_pool(inputs, ksize=[1, size, size, 1], strides=[1, stride, stride, 1], padding='SAME',
name=str(idx) + '_pool')
def fc_layer(self, idx, inputs, hiddens, flat=False, linear=False):
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1] * input_shape[2] * input_shape[3]
inputs_transposed = tf.transpose(inputs, (0, 3, 1, 2))
inputs_processed = tf.reshape(inputs_transposed, [-1, dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weight = tf.Variable(tf.truncated_normal([dim, hiddens], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[hiddens]))
if self.disp_console: print(
' Layer %d : Type = Full, Hidden = %d, Input dimension = %d, Flat = %d, Activation = %d' % (
idx, hiddens, int(dim), int(flat), 1 - int(linear)))
if linear: return tf.add(tf.matmul(inputs_processed, weight), biases, name=str(idx) + '_fc')
ip = tf.add(tf.matmul(inputs_processed, weight), biases)
return tf.maximum(self.alpha * ip, ip, name=str(idx) + '_fc')
def detect_from_cvmat(self, img):
s = time.time()
self.h_img, self.w_img, _ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray(img_RGB)
inputs = np.zeros((1, 448, 448, 3), dtype='float32')
inputs[0] = (img_resized_np / 255.0) * 2.0 - 1.0
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32, feed_dict=in_dict)
self.result = self.interpret_output(net_output[0])
self.show_results(img, self.result)
strtime = str(time.time() - s)
if self.disp_console: print('Elapsed time : ' + strtime + ' secs' + '\n')
def detect_from_file(self, filename):
if self.disp_console: print('Detect from ' + filename)
img = cv2.imread(filename)
# img = misc.imread(filename)
self.detect_from_cvmat(img)
def detect_from_crop_sample(self):
self.w_img = 640
self.h_img = 420
f = np.array(open('person_crop.txt', 'r').readlines(), dtype='float32')
inputs = np.zeros((1, 448, 448, 3), dtype='float32')
for c in range(3):
for y in range(448):
for x in range(448):
inputs[0, y, x, c] = f[c * 448 * 448 + y * 448 + x]
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32, feed_dict=in_dict)
self.boxes, self.probs = self.interpret_output(net_output[0])
img = cv2.imread('person.jpg')
self.show_results(self.boxes, img)
def interpret_output(self, output):
probs = np.zeros((7, 7, 2, 20))
class_probs = np.reshape(output[0:980], (7, 7, 20))
scales = np.reshape(output[980:1078], (7, 7, 2))
boxes = np.reshape(output[1078:], (7, 7, 2, 4))
offset = np.transpose(np.reshape(np.array([np.arange(7)] * 14), (2, 7, 7)), (1, 2, 0))
boxes[:, :, :, 0] += offset
boxes[:, :, :, 1] += np.transpose(offset, (1, 0, 2))
boxes[:, :, :, 0:2] = boxes[:, :, :, 0:2] / 7.0
boxes[:, :, :, 2] = np.multiply(boxes[:, :, :, 2], boxes[:, :, :, 2])
boxes[:, :, :, 3] = np.multiply(boxes[:, :, :, 3], boxes[:, :, :, 3])
boxes[:, :, :, 0] *= self.w_img
boxes[:, :, :, 1] *= self.h_img
boxes[:, :, :, 2] *= self.w_img
boxes[:, :, :, 3] *= self.h_img
for i in range(2):
for j in range(20):
probs[:, :, i, j] = np.multiply(class_probs[:, :, j], scales[:, :, i])
filter_mat_probs = np.array(probs >= self.threshold, dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0], filter_mat_boxes[1], filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs, axis=3)[
filter_mat_boxes[0], filter_mat_boxes[1], filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0: continue
for j in range(i + 1, len(boxes_filtered)):
if self.iou(boxes_filtered[i], boxes_filtered[j]) > self.iou_threshold:
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered > 0.0, dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append([self.classes[classes_num_filtered[i]], boxes_filtered[i][0], boxes_filtered[i][1],
boxes_filtered[i][2], boxes_filtered[i][3], probs_filtered[i]])
return result
def show_results(self, img, results):
img_cp = img.copy()
if self.filewrite_txt:
ftxt = open(self.tofile_txt, 'w')
for i in range(len(results)):
x = int(results[i][1])
y = int(results[i][2])
w = int(results[i][3]) // 2
h = int(results[i][4]) // 2
if self.disp_console: print(
' class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(
int(results[i][3])) + ',' + str(int(results[i][4])) + '], Confidence = ' + str(results[i][5]))
if self.filewrite_img or self.imshow:
cv2.rectangle(img_cp, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
cv2.rectangle(img_cp, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
cv2.putText(img_cp, results[i][0] + ' : %.2f' % results[i][5], (x - w + 5, y - h - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
if self.filewrite_txt:
ftxt.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h) + ',' + str(
results[i][5]) + '\n')
if self.filewrite_img:
if self.disp_console: print(' image file writed : ' + self.tofile_img)
cv2.imwrite(self.tofile_img, img_cp)
if self.imshow:
cv2.imshow('YOLO_small detection', img_cp)
cv2.waitKey(0)
if self.filewrite_txt:
if self.disp_console: print(' txt file writed : ' + self.tofile_txt)
ftxt.close()
def iou(self, box1, box2):
tb = min(box1[0] + 0.5 * box1[2], box2[0] + 0.5 * box2[2]) - max(box1[0] - 0.5 * box1[2],
box2[0] - 0.5 * box2[2])
lr = min(box1[1] + 0.5 * box1[3], box2[1] + 0.5 * box2[3]) - max(box1[1] - 0.5 * box1[3],
box2[1] - 0.5 * box2[3])
if tb < 0 or lr < 0:
intersection = 0
else:
intersection = tb * lr
return intersection / (box1[2] * box1[3] + box2[2] * box2[3] - intersection)
# my addition
def createFolder(self, path):
if not os.path.exists(path):
os.makedirs(path)
def debug_location(self, img, location):
img_cp = img.copy()
x = int(location[1])
y = int(location[2])
w = int(location[3]) // 2
h = int(location[4]) // 2
cv2.rectangle(img_cp, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
cv2.rectangle(img_cp, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5], (x - w + 5, y - h - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
cv2.imshow('YOLO_small detection', img_cp)
cv2.waitKey(1)
def debug_locations(self, img, locations):
img_cp = img.copy()
for location in locations:
x = int(location[1])
y = int(location[2])
w = int(location[3]) // 2
h = int(location[4]) // 2
cv2.rectangle(img_cp, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
cv2.rectangle(img_cp, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5], (x - w + 5, y - h - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
cv2.imshow('YOLO_small detection', img_cp)
cv2.waitKey(1)
def debug_gt_location(self, img, location):
img_cp = img.copy()
x = int(location[0])
y = int(location[1])
w = int(location[2])
h = int(location[3])
cv2.rectangle(img_cp, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('gt', img_cp)
cv2.waitKey(1)
def file_to_img(self, filepath):
img = cv2.imread(filepath)
return img
def file_to_video(self, filepath):
try:
video = cv2.VideoCapture(filepath)
except IOError:
print('cannot open video file: ' + filepath)
else:
print('unknown error reading video file')
return video
def iou(self, box1, box2):
tb = min(box1[0] + 0.5 * box1[2], box2[0] + 0.5 * box2[2]) - max(box1[0] - 0.5 * box1[2],
box2[0] - 0.5 * box2[2])
lr = min(box1[1] + 0.5 * box1[3], box2[1] + 0.5 * box2[3]) - max(box1[1] - 0.5 * box1[3],
box2[1] - 0.5 * box2[3])
if tb < 0 or lr < 0:
intersection = 0
else:
intersection = tb * lr
return intersection / (box1[2] * box1[3] + box2[2] * box2[3] - intersection)
def find_iou_cost(self, pred_locs, gts):
# for each element in the batch, find its iou. output a list of ious.
cost = 0
batch_size = len(pred_locs)
assert (len(gts) == batch_size)
print("batch_size: ")
ious = []
for i in range(batch_size):
pred_loc = pred_locs[i]
gt = gts[i]
iou_ = self.iou(pred_loc, gt)
ious.append(self, iou_)
return ious
def load_folder(self, path):
paths = [os.path.join(path, fn) for fn in next(os.walk(path))[2]]
# return paths
return sorted(paths)
def load_dataset_gt(self, gt_file):
txtfile = open(gt_file, "r")
lines = txtfile.read().split('\n') # '\r\n'
return lines
def find_gt_location(self, lines, id):
line = lines[id]
elems = line.split('\t') # for gt type 2
if len(elems) < 4:
elems = line.split(',') # for gt type 1
x1 = elems[0]
y1 = elems[1]
w = elems[2]
h = elems[3]
gt_location = [int(x1), int(y1), int(w), int(h)]
return gt_location
def find_best_location(self, locations, gt_location):
# locations (class, x, y, w, h, prob); (x, y) is the middle pt of the rect
# gt_location (x1, y1, w, h)
x1 = gt_location[0]
y1 = gt_location[1]
w = gt_location[2]
h = gt_location[3]
gt_location_revised = [x1 + w / 2, y1 + h / 2, w, h]
max_ious = 0
for id, location in enumerate(locations):
location_revised = location[1:5]
print("location: ", location_revised)
print("gt_location: ", gt_location_revised)
ious = self.iou(location_revised, gt_location_revised)
if ious >= max_ious:
max_ious = ious
index = id
print("Max IOU: " + str(max_ious))
if max_ious != 0:
best_location = locations[index]
class_index = self.classes.index(best_location[0])
best_location[0] = class_index
return best_location
else: # it means the detection failed, no intersection with the ground truth
return [0, 0, 0, 0, 0, 0]
def save_yolo_output(self, out_fold, yolo_output, filename):
name_no_ext = os.path.splitext(filename)[0]
output_name = name_no_ext
path = os.path.join(out_fold, output_name)
np.save(path, yolo_output)
def location_from_0_to_1(self, wid, ht, location):
location[1] /= wid
location[2] /= ht
location[3] /= wid
location[4] /= ht
return location
def gt_location_from_0_to_1(self, wid, ht, location):
wid *= 1.0
ht *= 1.0
location[0] /= wid
location[1] /= ht
location[2] /= wid
location[3] /= ht
return location
def locations_normal(self, wid, ht, locations):
wid *= 1.0
ht *= 1.0
locations[1] *= wid
locations[2] *= ht
locations[3] *= wid
locations[4] *= ht
return locations
def cal_yolo_loss(self, location, gt_location):
# Translate yolo's box mid-point (x0, y0) to top-left point (x1, y1), in order to compare with gt
location[0] = location[0] - location[2] / 2
location[1] = location[1] - location[3] / 2
loss = sum([(location[i] - gt_location[i]) ** 2 for i in range(4)]) * 100 / 4
return loss
def cal_yolo_IOU(self, location, gt_location):
# Translate yolo's box mid-point (x0, y0) to top-left point (x1, y1), in order to compare with gt
location[0] = location[0] - location[2] / 2
location[1] = location[1] - location[3] / 2
loss = self.iou(location, gt_location)
return loss
def prepare_training_data(self, img_fold, gt_file,
out_fold): # [or]prepare_training_data(self, list_file, gt_file, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
paths = self.load_folder(img_fold)
gt_locations = self.load_dataset_gt(gt_file)
avg_loss = 0
total = 0
total_time = 0
for id, path in enumerate(paths):
filename = os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img, self.w_img, _ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray(img_RGB)
inputs = np.zeros((1, 448, 448, 3), dtype='float32')
inputs[0] = (img_resized_np / 255.0) * 2.0 - 1.0
in_dict = {self.x: inputs}
start_time = time.time()
feature = self.sess.run(self.fc_30, feed_dict=in_dict)
cycle_time = time.time() - start_time
print('cycle time= ', cycle_time)
total_time += cycle_time
output = self.sess.run(self.fc_32, feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
gt_location = self.find_gt_location(gt_locations, id)
location = self.find_best_location(locations,
gt_location) # find the ROI that has the maximum IOU with the ground truth
self.debug_location(img, location)
self.debug_gt_location(img, gt_location)
# change location into [0, 1]
loss = self.cal_yolo_IOU(location[1:5], gt_location)
location = self.location_from_0_to_1(self.w_img, self.h_img, location)
avg_loss += loss
total += 1
print("loss: ", loss)
yolo_output = np.concatenate(
(np.reshape(feature, [-1, self.num_feat]),
np.reshape(location, [-1, self.num_predict])),
axis=1)
self.save_yolo_output(out_fold, yolo_output, filename)
avg_loss = avg_loss / total
print("YOLO avg_loss: ", avg_loss)
print("Time Spent on Tracking: " + str(total_time))
print("fps: " + str(id / total_time))
return
def loc_to_coordinates(self, loc):
loc = [i * 32 for i in loc]
x1 = int(loc[0] - loc[2] / 2)
y1 = int(loc[1] - loc[3] / 2)
x2 = int(loc[0] + loc[2] / 2)
y2 = int(loc[1] + loc[3] / 2)
return [x1, y1, x2, y2]
def coordinates_to_heatmap_vec(self, coord):
heatmap_vec = np.zeros(1024)
print(coord)
[classnum, x1, y1, x2, y2, prob] = coord
[x1, y1, x2, y2] = self.loc_to_coordinates([x1, y1, x2, y2])
for y in range(y1, y2):
for x in range(x1, x2):
index = y * 32 + x
heatmap_vec[index] = 1.0
return heatmap_vec
def prepare_training_data_heatmap(self, img_fold, gt_file,
out_fold): # [or]prepare_training_data(self, list_file, gt_file, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
paths = self.load_folder(img_fold)
gt_locations = self.load_dataset_gt(gt_file)
avg_loss = 0
total = 0
for id, path in enumerate(paths):
filename = os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img, self.w_img, _ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray(img_RGB)
inputs = np.zeros((1, 448, 448, 3), dtype='float32')
inputs[0] = (img_resized_np / 255.0) * 2.0 - 1.0
in_dict = {self.x: inputs}
feature = self.sess.run(self.fc_30, feed_dict=in_dict)
output = self.sess.run(self.fc_32, feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
gt_location = self.find_gt_location(gt_locations, id)
location = self.find_best_location(locations,
gt_location) # find the ROI that has the maximum IOU with the ground truth
self.debug_location(img, location)
self.debug_gt_location(img, gt_location)
# change location into [0, 1]
loss = self.cal_yolo_IOU(location[1:5], gt_location)
location = self.location_from_0_to_1(self.w_img, self.h_img, location)
heatmap_vec = self.coordinates_to_heatmap_vec(location)
avg_loss += loss
total += 1
print("loss: ", loss)
yolo_output = np.concatenate(
(np.reshape(feature, [-1, self.num_feat]),
np.reshape(heatmap_vec, [-1, self.num_heatmap])),
axis=1)
self.save_yolo_output(out_fold, yolo_output, filename)
avg_loss = avg_loss / total
print("YOLO avg_loss: ", avg_loss)
return
def prepare_training_data_multiTarget(self, img_fold, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
print(img_fold)
paths = self.load_folder(img_fold)
avg_loss = 0
total = 0
for id, path in enumerate(paths):
filename = os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img, self.w_img, _ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray(img_RGB)
inputs = np.zeros((1, 448, 448, 3), dtype='float32')
inputs[0] = (img_resized_np / 255.0) * 2.0 - 1.0
in_dict = {self.x: inputs}
feature = self.sess.run(self.fc_30, feed_dict=in_dict)
output = self.sess.run(self.fc_32, feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
self.debug_locations(img, locations)
# change location into [0, 1]
for i in range(0, len(locations)):
class_index = self.classes.index(locations[i][0])
locations[i][0] = class_index
locations[i] = self.location_from_0_to_1(self.w_img, self.h_img, locations[i])
if len(locations) == 1:
print('len(locations)= 1\n')
yolo_output = [[np.reshape(feature, [-1, self.num_feat])],
[ | np.reshape(locations, [-1, self.num_predict]) | numpy.reshape |
#!/usr/bin/python3
#--- coding:utf-8
import time, sys, os
from IPython import embed
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class Kmeans():
def __init__(self, n_clusters, max_iter = 1000, tol = 0.00001):
self.n_clusters = n_clusters
self.max_iter = max_iter
self.tol = tol
def fit(self, data):
shape,_ = data.shape
index = np.random.randint(0,shape,size=self.n_clusters)
k_points = data[index]
k_points_last = None
for a in range(self.max_iter):
label = []
k_points_last = k_points.copy()
for i in range(shape):
dis = []
for j in range(self.n_clusters):
dis.append(np.linalg.norm(data[i,:]-k_points[j,:]))
label.append(dis.index(min(dis)))
for i in range(self.n_clusters):
index = np.argwhere(np.array(label)==i)
if len(index) != 0: k_points[i,:] = data[index, :].mean(axis=0)
if np.linalg.norm(k_points-k_points_last) < self.tol:
break
return np.array(label)
class SOM():
def __init__(self, n_clusters, in_layer, s, knn=10, out_size=(3,3), m_iter=1000):
self.n_clusters = n_clusters
self.in_layer = in_layer.copy()
self.m_iter = m_iter
self.knn = knn
a,b = np.min(self.in_layer), np.max(self.in_layer)
self.w = (b-a)*np.random.rand(out_size[0], out_size[1], self.in_layer.shape[1])+a
self.color = ['y', 'r', 'g', 'b', 'c', 'm', 'k', 'pink', 'dark', 'orange', 'tan', 'gold']
self.label = np.zeros(len(in_layer))
self.res =None
self.neuron = {}
self.l = 1.0
self.s = s
self.som_r = int(self.w.shape[0]/2.5)
self.som_r_square = self.som_r**2
self.D_list = []
def Init_W(self):
for i in range(self.w.shape[0]):
for j in range(self.w.shape[1]):
k = np.random.randint(self.in_layer.shape[0])
self.w[i][j]=self.in_layer[k]
def Normalize_Input(self, X):
'''
for i in range(X.shape[0]):
t = np.linalg.norm(X[i])
X[i] /= t
#'''
return X
def Normalize_W(self, w):
'''
for i in range(w.shape[0]):
for j in range(w.shape[1]):
t = np.linalg.norm(w[i,j])
w[i,j] /= t
#'''
return w
def Get_Win_Neuron(self, x):
max_dis=float('inf')
min_dis=-float('inf')
for i in range(self.w.shape[0]):
for j in range(self.w.shape[1]):
'''
dis = x.dot(self.w[i,j]) #余弦距离
if dis > min_dis:
min_dis = dis
win_index = (i,j,dis)
'''
dis = np.linalg.norm(x-self.w[i,j])#欧式距离
if dis < max_dis:
max_dis = dis
win_index = (i,j,dis)
#'''
return win_index
def Get_Neighborhood(self, win, radius):
res = []
for i in range(max(0, win[0]-radius), min(self.w.shape[0], win[0]+radius+1)):
for j in range(max(0, win[1]-radius), min(self.w.shape[1], win[1]+radius+1)):
dis = (i-win[0])**2 + (j-win[1])**2
if dis <= self.som_r_square: res.append([i, j, dis])
return res
def Update_W(self, index, X, r):
for i in range(len(index)):
self.w[index[i][0],index[i][1]] += self.l*self.h(index[i][2],r)*(X-self.w[index[i][0],index[i][1]])
def Radius(self, t):
return (self.som_r*(4.5/(t+3)))**2
def alpha(self, t):
if self.l <= 0.01: return 0.01
else: return 1./(2*t+1)
def h(self, dis, r_square):
return np.exp(-float(dis)/(2*r_square))
def Get_Result(self):
self.w = self.Normalize_W(self.w)
self.neuron={}
for i in range(self.in_layer.shape[0]):
win = self.Get_Win_Neuron(self.in_layer[i])
key = win[0]*self.w.shape[0] + win[1]
if key in self.neuron.keys():
self.neuron[key].append(i)
else:
self.neuron.fromkeys([key])
self.neuron[key]=[i]
def Train(self, fpath):
self.in_layer = self.Normalize_Input(self.in_layer)
for i in range(self.m_iter):
p = int(46*i/self.m_iter)+1
print(' P|'+'*'*p+' '*(46-p)+'| '+str(i)+'/'+str(self.m_iter), end='\r')
self.w = self.Normalize_W(self.w)
r = self.Radius(i)
self.l = self.alpha(i)
D = 0
for k in range(self.in_layer.shape[0]):
j = np.random.randint(self.in_layer.shape[0])
win = self.Get_Win_Neuron(self.in_layer[j])
index = self.Get_Neighborhood(win, self.som_r)
self.Update_W(index, self.in_layer[j], r)
D += win[2]
self.D_list.append(D)
self.Get_Result()
print(' P|'+'*'*46+'| '+str(self.m_iter)+'/'+str(self.m_iter))
return self.w.reshape(self.w.shape[0]*self.w.shape[1], self.w.shape[2])[list(self.neuron.keys())], self.neuron
def Preprocess(self):
#filter outliers
dev=[]
key_l = list(self.neuron.keys())
for key in key_l:
a, b=int(key/self.w.shape[0]), key%self.w.shape[0]
mean_x = self.in_layer[self.neuron[key]].mean(axis=0)
d=np.sum((self.w[a][b]-mean_x)**2)
dev.append(d)
mean_dev, std_dev=np.mean(dev), np.std(dev)
for i in range(len(dev)):
if dev[i] > mean_dev+std_dev:
del self.neuron[key_l[i]]
#embed(header='First time')
#filter outliers and noises
dev=[]
key_l = list(self.neuron.keys())
for key in key_l:
for v in self.neuron[key]:
a, b=int(key/self.w.shape[0]), key%self.w.shape[0]
d=np.sum((self.in_layer[v]-self.w[a][b])**2)
dev.append(d)
mean_dev, std_dev=np.mean(dev), np.std(dev)
cnt=0
for key in key_l:
for v in self.neuron[key]:
if dev[cnt] > mean_dev+std_dev:
self.neuron[key].remove(v)
cnt+=1
if self.neuron[key] == '':
del self.neuron[key]
#filter noises
dev=[]
key_l = list(self.neuron.keys())
for key in key_l:
a,b = int(key/self.w.shape[0]),key%self.w.shape[0]
temp=[]
radius=1
while len(temp)==0:
for i in range(max(0, a-radius), min(self.w.shape[0], a+radius+1)):
for j in range(max(0, b-radius), min(self.w.shape[1], b+radius+1)):
#if i*self.w.shape[0]+j in key_l:
temp.append(np.sum((self.w[a][b]-self.w[i][j])**2))
radius+=1
dev.append(np.mean(temp))
mean_dev,std_dev = np.mean(dev), np.std(dev)
for i in range(len(dev)):
if dev[i]>mean_dev+std_dev*3:
del self.neuron[key_l[i]]
t=np.arange(len(dev))
dev.sort()
plt.plot(t, np.array(dev))
plt.axhline(mean_dev, color='r')
plt.axhline(mean_dev+std_dev, color='g')
plt.axhline(mean_dev+3*std_dev, color='b')
plt.savefig('dev')
#embed()
'''
dev=[]
key_l = list(self.neuron.keys())
for key in key_l:
dev.append(len(self.neuron[key]))
mean_dev, std_dev=np.mean(dev), np.std(dev)
for key in key_l:
if len(self.neuron[key])<mean_dev-std_dev:
del self.neuron[key]
'''
def Get_Dis(self, X):
S = np.zeros((len(X), len(X)))
for i in range(len(X)):
a,b=int(X[i]/self.w.shape[0]), X[i]%self.w.shape[0]
for j in range(i+1, len(X)):
c,d=int(X[j]/self.w.shape[0]), X[j]%self.w.shape[0]
#a,b=int(X[i]/self.w.shape[0])-int(X[j]/self.w.shape[0]), X[i]%self.w.shape[0]-X[j]%self.w.shape[0]
#if abs(a-c)<self.knn and abs(b-d)<self.knn:
if (a-c)**2+(b-d)**2<self.knn**2:
S[i][j] = 1
S[j][i] = S[i][j]
return S
def Get_W_KNN(self, X, S):
W = np.zeros((len(X), len(X)))
for i in range(len(X)):
#index = np.argpartition(S[i], self.knn)[:self.knn+1]
index = np.argwhere(S[i]>0).flatten()
a,b=int(X[i]/self.w.shape[0]), X[i]%self.w.shape[0]
if len(index) < self.knn:
dis=np.zeros(len(X))
for j in range(len(X)):
c,d=int(X[j]/self.w.shape[0]), X[j]%self.w.shape[0]
dis[j] = np.sum((self.w[a][b]-self.w[c][d])**2)
index = np.argpartition(dis, self.knn)[:self.knn]
W[i,index]= | np.exp(-dis[index]/self.sigma) | numpy.exp |
#%%
import cobra
import optlang_enumerator.cobra_cnapy
import optlang
import optlang_enumerator.mcs_computation as mcs_computation
import numpy
import pickle
from pathlib import Path
results_cache_dir = None # do not cache preprocessing results
# results_cache_dir = Path(r"E:\cnapy_tmp\results_cache") # cache preprocessing results in the given directory
#%%
from importlib import reload
import optlang_enumerator
reload(optlang_enumerator)
import optlang_enumerator.mcs_computation as mcs_computation
#%%
ecc2 = optlang_enumerator.cobra_cnapy.CNApyModel.read_sbml_model("ECC2comp.sbml")
# allow all reactions that are not boundary reactions as cuts (same as exclude_boundary_reactions_as_cuts option of compute_mcs)
cuts = | numpy.array([not r.boundary for r in ecc2.reactions]) | numpy.array |
import pytest
import numpy as np
import numpy.testing as npt
from lenstronomy.LensModel.QuadOptimizer.param_manager import PowerLawFixedShear, \
PowerLawFixedShearMultipole, PowerLawFreeShear, PowerLawFreeShearMultipole
class TestParamClasses(object):
def setup(self):
self.zlens, self.zsource = 0.5, 1.5
epl_kwargs = {'theta_E': 1., 'center_x': 0., 'center_y': 0., 'e1': 0.2, 'e2': 0.1, 'gamma': 2.05}
shear_kwargs = {'gamma1': 0.05, 'gamma2': -0.04}
kwargs_macro = [epl_kwargs, shear_kwargs]
self.x_image = np.array([0.65043538, -0.31109505, 0.78906059, -0.86222271])
self.y_image = np.array([-0.89067493, 0.94851787, 0.52882605, -0.25403778])
halo_list = ['SIS', 'SIS', 'SIS']
halo_z = [self.zlens - 0.1, self.zlens, self.zlens + 0.4]
halo_kwargs = [{'theta_E': 0.1, 'center_x': 0.3, 'center_y': -0.9},
{'theta_E': 0.15, 'center_x': 1.3, 'center_y': -0.5},
{'theta_E': 0.06, 'center_x': -0.4, 'center_y': -0.4}]
self.kwargs_epl = kwargs_macro + halo_kwargs
self.zlist_epl = [self.zlens, self.zlens] + halo_z
self.lens_model_list_epl = ['EPL', 'SHEAR'] + halo_list
kwargs_multi = [{'m': 4, 'a_m': -0.04, 'phi_m': -0.2, 'center_x': 0.1, 'center_y': -0.1}]
self.kwargs_multipole = kwargs_macro + kwargs_multi + halo_kwargs
self.zlist_multipole = [self.zlens, self.zlens, self.zlens] + halo_z
self.lens_model_list_multipole = ['EPL', 'SHEAR'] + ['MULTIPOLE'] + halo_list
def test_param_penalty(self):
param_class = PowerLawFreeShear(self.kwargs_epl)
args = param_class.kwargs_to_args(self.kwargs_epl)
param_penalty = param_class.param_chi_square_penalty(args)
npt.assert_almost_equal(0, param_penalty)
def test_plaw_free_shear(self):
param_class = PowerLawFreeShear(self.kwargs_epl)
npt.assert_(param_class.to_vary_index==2)
kwargs_in = [{'theta_E': 1., 'center_x': 0., 'center_y': 0.3, 'e1': 0.25, 'e2': 0.1, 'gamma': 2.05},
{'gamma1': 0.05, 'gamma2': -0.01}, {'theta_E': -0.3, 'center_x': 0., 'center_y': 0.04}]
args_epl = param_class.kwargs_to_args(kwargs_in)
npt.assert_almost_equal(args_epl, [1, 0, 0.3, 0.25, 0.1, 0.05, -0.01])
kwargs_out = param_class.args_to_kwargs(args_epl)
npt.assert_almost_equal(kwargs_out[0]['gamma'], 2.05)
for key in kwargs_out[-1].keys():
npt.assert_almost_equal(kwargs_out[-1][key], self.kwargs_epl[-1][key])
def test_plaw_fixed_shear(self):
param_class = PowerLawFixedShear(self.kwargs_epl, 0.12)
npt.assert_(param_class.to_vary_index == 2)
kwargs_in = [{'theta_E': 1., 'center_x': 0., 'center_y': 0.3, 'e1': 0.25, 'e2': 0.1, 'gamma': 2.05},
{'gamma1': 0.05, 'gamma2': -0.01}, {'theta_E': -0.3, 'center_x': 0., 'center_y': 0.04}]
args_epl = param_class.kwargs_to_args(kwargs_in)
npt.assert_almost_equal(args_epl[0:5], [1, 0, 0.3, 0.25, 0.1])
kwargs_out = param_class.args_to_kwargs(args_epl)
npt.assert_almost_equal(kwargs_out[0]['gamma'], 2.05)
npt.assert_almost_equal(kwargs_out[1]['gamma1'] ** 2 + kwargs_out[1]['gamma2']**2, 0.12 ** 2)
for key in kwargs_out[-1].keys():
npt.assert_almost_equal(kwargs_out[-1][key], self.kwargs_epl[-1][key])
def test_plawboxydisky_fixed_shear(self):
param_class = PowerLawFixedShearMultipole(self.kwargs_multipole, 0.12)
npt.assert_(param_class.to_vary_index == 3)
kwargs_in = [{'theta_E': 1., 'center_x': 0., 'center_y': 0.3, 'e1': 0.25, 'e2': 0.1, 'gamma': 2.05},
{'gamma1': 0.05, 'gamma2': -0.01}, {'theta_E': -0.3, 'center_x': 0., 'center_y': 0.04}]
args_epl = param_class.kwargs_to_args(kwargs_in)
npt.assert_almost_equal(args_epl[0:5], [1, 0, 0.3, 0.25, 0.1])
kwargs_out = param_class.args_to_kwargs(args_epl)
npt.assert_almost_equal(kwargs_out[0]['gamma'], 2.05)
| npt.assert_almost_equal(kwargs_out[1]['gamma1'] ** 2 + kwargs_out[1]['gamma2']**2, 0.12 ** 2) | numpy.testing.assert_almost_equal |
# CODING-STYLE CHECKS:
# pycodestyle test_decorators.py
import os
import sys
import pytest
import importlib
import numpy as np
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import taxcalc
from taxcalc.decorators import *
def test_create_apply_function_string():
ans = create_apply_function_string(['a', 'b', 'c'], ['d', 'e'], [])
exp = ("def ap_func(x_0,x_1,x_2,x_3,x_4):\n"
" for i in range(len(x_0)):\n"
" x_0[i],x_1[i],x_2[i] = jitted_f(x_3[i],x_4[i])\n"
" return x_0,x_1,x_2\n")
assert ans == exp
def test_create_apply_function_string_with_params():
ans = create_apply_function_string(['a', 'b', 'c'], ['d', 'e'], ['d'])
exp = ("def ap_func(x_0,x_1,x_2,x_3,x_4):\n"
" for i in range(len(x_0)):\n"
" x_0[i],x_1[i],x_2[i] = jitted_f(x_3,x_4[i])\n"
" return x_0,x_1,x_2\n")
assert ans == exp
def test_create_toplevel_function_string_mult_outputs():
ans = create_toplevel_function_string(['a', 'b'], ['d', 'e'],
['pm', 'pm', 'pf', 'pm'])
exp = ''
exp = ("def hl_func(pm, pf):\n"
" from pandas import DataFrame\n"
" import numpy as np\n"
" import pandas as pd\n"
" def get_values(x):\n"
" if isinstance(x, pd.Series):\n"
" return x.values\n"
" else:\n"
" return x\n"
" outputs = \\\n"
" (pm.a, pm.b) = \\\n"
" applied_f(get_values(pm.a), get_values(pm.b), "
"get_values(pf.d), get_values(pm.e), )\n"
" header = ['a', 'b']\n"
" return DataFrame(data=np.column_stack(outputs),"
"columns=header)")
assert ans == exp
def test_create_toplevel_function_string():
ans = create_toplevel_function_string(['a'], ['d', 'e'],
['pm', 'pf', 'pm'])
exp = ''
exp = ("def hl_func(pm, pf):\n"
" from pandas import DataFrame\n"
" import numpy as np\n"
" import pandas as pd\n"
" def get_values(x):\n"
" if isinstance(x, pd.Series):\n"
" return x.values\n"
" else:\n"
" return x\n"
" outputs = \\\n"
" (pm.a) = \\\n"
" applied_f(get_values(pm.a), get_values(pf.d), "
"get_values(pm.e), )\n"
" header = ['a']\n"
" return DataFrame(data=outputs,"
"columns=header)")
assert ans == exp
def some_calc(x, y, z):
a = x + y
b = x + y + z
return (a, b)
def test_make_apply_function():
ans_do_jit = make_apply_function(some_calc, ['a', 'b'], ['x', 'y', 'z'],
[], do_jit=True, no_python=True)
assert ans_do_jit
ans_no_jit = make_apply_function(some_calc, ['a', 'b'], ['x', 'y', 'z'],
[], do_jit=False, no_python=True)
assert ans_no_jit
@apply_jit(["a", "b"], ["x", "y", "z"], nopython=True)
def Magic_calc(x, y, z):
a = x + y
b = x + y + z
return (a, b)
def Magic(pm, pf):
# Adjustments
outputs = pf.a, pf.b = Magic_calc(pm, pf)
header = ['a', 'b']
return DataFrame(data=np.column_stack(outputs), columns=header)
@iterate_jit(nopython=True)
def Magic_calc2(x, y, z):
a = x + y
b = x + y + z
return (a, b)
class Foo(object):
pass
@iterate_jit(nopython=True)
def faux_function(MARS):
if MARS == 1:
var = 2
else:
var = 1
return var
@iterate_jit(nopython=True)
def ret_everything(a, b, c, d, e, f):
c = a + b
d = a + b
e = a + b
f = a + b
return (c, d, e,
f)
def test_magic_apply_jit():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
xx = Magic(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5, columns=["a", "b"])
assert_frame_equal(xx, exp)
def test_magic_apply_jit_swap():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
xx = Magic(pf, pm)
exp = DataFrame(data=[[2.0, 3.0]] * 5, columns=["a", "b"])
assert_frame_equal(xx, exp)
def test_magic_iterate_jit():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
xx = Magic_calc2(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5, columns=["a", "b"])
assert_frame_equal(xx, exp)
def test_faux_function_iterate_jit():
pm = Foo()
pf = Foo()
pf.MARS = np.ones((5,))
pf.var = np.ones((5,))
ans = faux_function(pm, pf)
exp = DataFrame(data=[2.0] * 5, columns=['var'])
assert_frame_equal(ans, exp)
def test_ret_everything_iterate_jit():
pm = Foo()
pf = Foo()
pf.a = np.ones((5,))
pf.b = np.ones((5,))
pf.c = np.ones((5,))
pf.d = np.ones((5,))
pf.e = np.ones((5,))
pf.f = np.ones((5,))
ans = ret_everything(pm, pf)
exp = DataFrame(data=[[2.0, 2.0, 2.0, 2.0]] * 5,
columns=["c", "d", "e", "f"])
assert_frame_equal(ans, exp)
@iterate_jit(nopython=True)
def Magic_calc3(x, y, z):
a = x + y
b = a + z
return (a, b)
def test_function_takes_kwarg():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
ans = Magic_calc3(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5,
columns=["a", "b"])
assert_frame_equal(ans, exp)
@iterate_jit(nopython=True)
def Magic_calc4(x, y, z):
a = x + y
b = a + z
return (a, b)
def test_function_no_parameters_listed():
pm = Foo()
pf = Foo()
pm.a = | np.ones((5,)) | numpy.ones |
# coding: utf-8
# In[123]:
import numpy as np
class DecisonTree(object):
def __init__(self,**kwargs):
'''
按照CART实现
task:Classification | Regression
or 后 都是 不传参数的默认值
'''
self.task = kwargs.get('task') or 'Classification'
self.max_depth = kwargs.get('max_depth') or np.infty
min_samples_split = kwargs.get('min_samples_split')
self.min_samples_split = kwargs.get('min_samples_split') or 2
self.min_samples_leaf = kwargs.get('min_samples_leaf') or 1
self.min_impurity_decrease = kwargs.get('min_samples_leaf') or 0
def train(self,X_train,y_train):
'''
对于类别属性来说,有两种处理方法:
1. 按属性中所有的值来分类来生成树(C3.0等早期分类树就是这种方式)
2. 将类别属性One-hot 编码来这样就只有0-1变量了。(Xgboost,CART 是这种)
3. 其实如果按照最好的离散属性分割方式,应该穷举所有的组合,找到一个最好的分割方式。H20好像有这种实践。
对于连续属性来说,就很好办了,直接二分分类。
属性中的值来说,一般来说,如果是(0,1)变量,那么每次分类后就可以去掉这个特征了,对于连续变量就不是了,要继续保留参与分割。
本算法采用方法2处理离散变量。即不再区分特征是离散变量还是连续变量,这样做能降低代码复杂度,而且做到了分类和回归任务的统一处理。
本算法仅仅生成二叉树,不生成多叉树。
'''
'''
不可采用分割方式,将子数据集传入递归函数,这样会丢失feature 位置,也就没法记录分割位置了,哈哈哈
算了,还是切割吧。简化程序处理
'''
self.X_train = X_train
self.y_train = y_train
tree = self.TreeGenerate(X_train,y_train)
self.Tree = self.pruning(tree)
def predict(self,X_test):
def get_node_value(X):
T = self.Tree
while True:
F = T['splitF']
V = T['splitV']
T = T['left'] if X[F] < V else T['right']
if not isinstance(T,dict):
return T
ret = np.apply_along_axis(get_node_value,1,X_test)
return ret
def TreeGenerate(self,X_train,y_train,depth = 0):
best_feature,best_value,best_error = self.chooseBestSplitfeature(X_train,y_train,depth)#选取最佳分裂属性,最佳分裂点
#print('best_feature,best_value::',best_feature,best_value)
if best_feature is None:#如果best_feature 为空,说明当前节点不满足分裂条件,函数直接返回节点值。
return best_value
Tree = {}
Tree['splitF'] = best_feature
Tree['splitV'] = best_value
Tree['best_error'] = best_error
Tree['depth'] = depth
left_X_train,left_y_train,right_X_train,right_y_train = self.binSplit(X_train,y_train,best_feature,best_value)
Tree['left'] = self.TreeGenerate(left_X_train,left_y_train,depth+1)
Tree['right'] = self.TreeGenerate(right_X_train,right_y_train,depth+1)
return Tree
def chooseBestSplitfeature(self,X_train,y_train,depth):
cls,cnt = np.unique(y_train,return_counts=True)#统计y值的数量
Error_ytrain = self.cal_Error(y_train)
NodeV_ytrain = self.get_NodeValue(y_train)
if (cls.shape[0]==1) or (depth > self.max_depth) or ((X_train == X_train[0]).all()) or (X_train.shape[0] < self.min_samples_split):
#如果y值全部一样,停止分裂;超过最大高度,停止分裂;X中全部样本相同,停止分裂;X数量小于最小样本分割数,停止分裂
return None,NodeV_ytrain,Error_ytrain
# if depth > self.max_depth:#超过最大高度,停止分裂
# return None,get_NodeValue(y_train),Error_ytrain
# if (X_train == X_train[0]).all():
# #如果属性值为空为空,或者X中全部样本相同,则返回y中出现最多的值
# return None,get_NodeValue(y_train),Error_ytrain
# if X_train.shape[0] < self.min_samples_split:# 如果X数量小于最小样本分割数,停止分裂
# return None,get_NodeValue(y_train),Error_ytrain
current_error = Error_ytrain
#print('current_error:',current_error)
best_feature = 0
best_value = NodeV_ytrain
best_error = Error_ytrain
for i in range(X_train.shape[1]):
cls = np.unique(X_train[:,i])
if cls.shape[0]==1:
continue
cls = np.delete(cls, | np.argmin(cls) | numpy.argmin |
# Brain Tumor Classification
# Enhance tumor region in each image.
# Author: <NAME>
# Copyleft: MIT Licience
# ,,, ,,,
# ;" '; ;' ",
# ; @.ss$$$$$$s.@ ;
# `s$$$$$$$$$$$$$$$'
# $$$$$$$$$$$$$$$$$$
# $$$$P""Y$$$Y""W$$$$$
# $$$$ p"$$$"q $$$$$
# $$$$ .$$$$$. $$$$'
# $$$DaU$$O$$DaU$$$'
# '$$$$'.^.'$$$$'
# '&$$$$$&'
from __future__ import print_function
import os
import warnings
import numpy as np
import nibabel as nib
from multiprocessing import Pool, cpu_count
from scipy.ndimage.interpolation import zoom
# Ignore the warning caused by SciPy
warnings.simplefilter("ignore", UserWarning)
# Helper function to run in multiple processes
def unwrap_preprocess(arg, **kwarg):
return BTCPreprocess._preprocess(*arg, **kwarg)
class BTCPreprocess(object):
def __init__(self, input_dirs, output_dirs, volume_type="t1ce"):
'''__INIT__
Generates paths for preprocessing.
Variables:
- self.in_paths: a list contains path of each input image.
- self.out_paths: a list provides path for each output image.
- self.mask_paths: a list contains path of mask for each input image.
Inputs:
-------
- input_dirs: a list with two lists, [hgg_input_dir, lgg_input_dir],
path of the directory which saves input images of\
HGG and LGG subjects.
- output_dirs: a list with teo lists, [hgg_output_dir, lgg_output_dir],
path of output directory for every subject in HGG and LGG.
- volume_type: string, type of brain volume, one of "t1ce", "t1", "t2"
or "flair". Default is "t1ce".
'''
self.in_paths, self.out_paths, self.mask_paths = \
self.generate_paths(input_dirs, output_dirs, volume_type)
return
def run(self, is_mask=True, non_mask_coeff=0.333, processes=-1):
'''RUN
Function to map task to multiple processes.
Inputs:
-------
- is_mask: boolearn, if True, enhance tumor region.
Default is True.
- non_mask_coeff: float from 0 to 1, the coefficient of
voxels in non-tumor region. Default is 0.333.
- processes: int, the number of processes used. Default is -1,
which means use all processes.
'''
print("\nPreprocessing on the sample in BraTS dataset.\n")
num = len(self.in_paths)
# Generate parameters
paras = zip([self] * num, self.in_paths, self.out_paths, self.mask_paths,
[is_mask] * num, [non_mask_coeff] * num)
# Set the number of processes
if processes == -1 or processes > cpu_count():
processes = cpu_count()
# Map task
pool = Pool(processes=processes)
pool.map(unwrap_preprocess, paras)
return
def _preprocess(self, in_path, to_path, mask_path,
is_mask=True, non_mask_coeff=0.333):
'''_PREPROCESS
For each input image, four steps are done:
-1- If is_mask, enhance tumor region.
-2- Remove background.
-3- Resize image.
-4- Save image.
Inputs:
-------
- in_path: string, path of input image.
- to_path: string, path of output image.
- mask_path: string, path of the mask of input image.
- is_mask: boolearn, if True, enhance tumor region.
Default is True.
- non_mask_coeff: float from 0 to 1, the coefficient of
voxels in non-tumor region. Default is 0.333.
'''
try:
print("Preprocessing on: " + in_path)
# Load image
volume = self.load_nii(in_path)
if is_mask:
# Enhance tumor region
mask = self.load_nii(mask_path)
volume = self.segment(volume, mask, non_mask_coeff)
# Removce background
volume = self.trim(volume)
# Resize image
volume = self.resize(volume, [112, 112, 96])
# Save image
self.save2nii(to_path, volume)
except RuntimeError:
print("\tFailed to rescal:" + in_path)
return
return
@staticmethod
def generate_paths(in_dirs, out_dirs, volume_type=None):
'''GENERATE_PATHS
Generates three lists with files' paths for prerprocessing.
Inputs:
-------
- input_dirs: a list with two lists, [hgg_input_dir, lgg_input_dir],
path of the directory which saves input images of\
HGG and LGG subjects.
- output_dirs: a list with teo lists, [hgg_output_dir, lgg_output_dir],
path of output directory for every subject in HGG and LGG.
- volume_type: string, type of brain volume, one of "t1ce", "t1", "t2"
or "flair". Default is "t1ce".
Outputs:
--------
- in_paths: a list contains path of each input image.
- out_paths: a list provides path for each output image.
- mask_paths: a list contains path of mask for each input image.
'''
# Function to create new directory
# according to given path
def create_dir(path):
if not os.path.isdir(path):
os.makedirs(path)
return
in_paths, out_paths, mask_paths = [], [], []
for in_dir, out_dir in zip(in_dirs, out_dirs):
# For HGG or LFF subjects
if not os.path.isdir(in_dir):
print("Input folder {} is not exist.".format(in_dir))
continue
# Create output folder for HGG or LGG subjects
create_dir(out_dir)
for subject in os.listdir(in_dir):
# For each subject in HGG or LGG
subject_dir = os.path.join(in_dir, subject)
subject2dir = os.path.join(out_dir, subject)
# Create folder for output
create_dir(subject2dir)
scan_names = os.listdir(subject_dir)
# Get path of mask file
for scan_name in scan_names:
if "seg" in scan_name:
scan_mask_path = os.path.join(subject_dir, scan_name)
for scan_name in scan_names:
if "seg" in scan_name:
continue
if volume_type is not None:
if volume_type not in scan_name:
continue
# When find the target volume, save its path
# and save paths for its output and mask
in_paths.append(os.path.join(subject_dir, scan_name))
out_paths.append(os.path.join(subject2dir, scan_name))
mask_paths.append(scan_mask_path)
return in_paths, out_paths, mask_paths
@staticmethod
def load_nii(path):
'''LOAD_NII
Load image to numpy ndarray from NIfTi file.
Input:
------
- path: string , path of input image.
Ouput:
------
- A numpy array of input imgae.
'''
return np.rot90(nib.load(path).get_data(), 3)
@staticmethod
def segment(volume, mask, non_mask_coeff=0.333):
'''SEGMENT
Enhance tumor region by suppressing non-tumor region
with a coefficient.
Inuuts:
-------
- volume: numpy ndarray, input image.
- mask: numpy ndarray, mask with segmentation labels.
- non_mask_coeff: float from 0 to 1, the coefficient of
voxels in non-tumor region. Default is 0.333.
Output:
-------
- segged: numpy ndarray, tumor enhanced image.
'''
# Set background to 0
if np.min(volume) != 0:
volume -= np.min(volume)
# Suppress non-tumor region
non_mask_idx = np.where(mask == 0)
segged = np.copy(volume)
segged[non_mask_idx] = segged[non_mask_idx] * non_mask_coeff
return segged
@staticmethod
def trim(volume):
'''TRIM
Remove unnecessary background around brain.
Input:
------
- volume: numpy ndarray, input image.
Output:
-------
- trimmed: numpy ndarray, image without unwanted background.
'''
# Get indices of slices that have brain's voxels
non_zero_slices = [i for i in range(volume.shape[-1])
if np.sum(volume[..., i]) > 0]
# Remove slices that only have background
volume = volume[..., non_zero_slices]
# In each slice, find the minimum area of brain
# Coordinates of area are saved
row_begins, row_ends = [], []
col_begins, col_ends = [], []
for i in range(volume.shape[-1]):
non_zero_pixels = np.where(volume > 0)
row_begins.append(np.min(non_zero_pixels[0]))
row_ends.append(np.max(non_zero_pixels[0]))
col_begins.append(np.min(non_zero_pixels[1]))
col_ends.append(np.max(non_zero_pixels[1]))
# Find the maximum area from all minimum areas
row_begin, row_end = min(row_begins), max(row_ends)
col_begin, col_end = min(col_begins), max(col_ends)
# Generate a minimum square area taht includs the maximum area
rows_num = row_end - row_begin
cols_num = col_end - col_begin
more_col_len = rows_num - cols_num
more_col_len_left = more_col_len // 2
more_col_len_right = more_col_len - more_col_len_left
col_begin -= more_col_len_left
col_end += more_col_len_right
len_of_side = rows_num + 1
# Remove unwanted background
trimmed = | np.zeros([len_of_side, len_of_side, volume.shape[-1]]) | numpy.zeros |
import numpy as np
from pydiva import pydiva2d
import os
import unittest
print("Running tests on Diva data")
print(" ")
class TestDivaData(unittest.TestCase):
@classmethod
def setUp(cls):
# Create lists and arrays
cls.xlist = [1., 2., 10]
cls.ylist = [2., -1., 0.]
cls.datalist = [0., 10, 0]
cls.weightlist = [1., .2, 1.]
cls.xarray = np.array((6., 4., 2.1))
cls.yarray = np.array((1., 10., -1))
cls.yarray2 = np.array((1., 10., -1, 3.))
cls.datarray = np.array((7., 8., 9.))
cls.weightarray = np.array((1., 1., 1.))
cls.nogeojsonfile = "./nodata/data.js"
cls.outputfile = "./datawrite/data.dat"
cls.geojsonfile = "./datawrite/data.js"
def test_init_data(self):
data0 = pydiva2d.Diva2DData()
self.assertIsNone(data0.x)
self.assertIsNone(data0.y)
self.assertIsNone(data0.field)
self.assertIsNone(data0.weight)
data1 = pydiva2d.Diva2DData(self.xlist, self.ylist, self.datalist)
np.testing.assert_array_equal(data1.x, self.xlist)
np.testing.assert_array_equal(data1.y, self.ylist)
np.testing.assert_array_equal(data1.field, self.datalist)
np.testing.assert_array_equal(data1.weight, np.ones_like(data1.field))
# Mix lists and arrays
data2 = pydiva2d.Diva2DData(self.xarray, self.yarray, self.datalist, self.weightlist)
np.testing.assert_array_equal(data2.x, self.xarray)
| np.testing.assert_array_equal(data2.y, self.yarray) | numpy.testing.assert_array_equal |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from time import time
from scipy.ndimage import imread
from scipy import misc
from sklearn.preprocessing import LabelBinarizer
from model import Model
# Read in train and test image
TRAIN_DIR = 'data/train/'
TEST_DIR = 'data/test/'
PRED_DIR = 'data/pred/'
SAVE_PATH='checkpoints/convnet_face/face-convnet-2'
def load_data(img_dir, labels=False, one_hot=False, binary_class=False):
img_data = []
data_labels = []
if labels:
# get folder names aka class labels
class_labels = os.listdir(img_dir)
for label in class_labels:
images = os.listdir(img_dir + label)
# gather image array and its class
for img in images:
img_arr = imread(img_dir + label + "/" + img)
img_data.append(img_arr)
data_labels.append(label)
if one_hot:
data_labels_enc = []
# convert categorical labels to one-hot array
enc = LabelBinarizer()
data_labels = enc.fit_transform(data_labels)
if binary_class:
# quick and dirty way of converting binary labels to sparse matrix
for i in data_labels:
if i == 1:
i = [1, 0]
data_labels_enc.append(i)
else:
i = [0, 1]
data_labels_enc.append(i)
return | np.array(img_data) | numpy.array |
import sys
import os
import numpy as np
import cv2
import scipy
from scipy.stats import norm
from scipy.signal import convolve2d
import math
import numpy as np
from PIL import Image
def roi(image):
im = image
h,w=im.shape[:2]
r = cv2.selectROI(im)
imCrop = im[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])]
a=imCrop.shape
l,m=imCrop.shape[:2]
roi=np.zeros((h,w,3))
roi[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])]=255
new_image=roi.astype(np.uint8)
b=new_image.shape
return new_image
def pad_clr(image):
h, w = image.shape[:2]
fr_copy = image[0:1, ::1, ::]
a = fr_copy.shape
lr_copy = image[h - 1:h, ::1, ::]
lc_copy = image[::1, w - 1:w, ::]
fc_copy = image[::1, 0:1, ::]
new_image = np.zeros((image.shape[0] + 2, image.shape[1] + 2, 3))
pad_main = np.copy(image[0:h, 0:w, ::])
l, b = new_image.shape[:2]
new_image[1:h + 1, 1:w + 1, ::] = pad_main
new_image[0:1, 1:w + 1, ::] = lr_copy
new_image[l - 1:l:1, 1:b - 1, ::] = fr_copy
new_image = np.array(new_image, dtype=np.uint8)
nh, nw = new_image.shape[:2]
return new_image
def pad_bw(image):
h, w = image.shape[:2]
fr_copy = image[0:1, ::1]
lr_copy = image[h - 1:h, ::1]
lc_copy = image[::1, w - 1:w]
fc_copy = image[::1, 0:1]
new_image = np.zeros((image.shape[0] + 2, image.shape[1] + 2))
pad_main = np.copy(image[0:h, 0:w])
l, b = new_image.shape[:2]
new_image[1:h + 1, 1:w + 1] = pad_main
new_image[0:1, 1:w + 1] = lr_copy
new_image[l - 1:l:1, 1:b - 1] = fr_copy
new_image[1:l - 1, b - 1:b] = fc_copy
new_image[1:l - 1, 0:1] = lc_copy
new_image = np.array(new_image, dtype=np.uint8)
nh, nw = new_image.shape[:2]
return new_image
def ComputePyr(ip_img_usr,num_layers):
if len(ip_img_usr.shape) == 2:
count=[]
h, w = ip_img_usr.shape[:2]
fr_copy = ip_img_usr[0:1, ::1]
lr_copy = ip_img_usr[h - 1:h, ::1]
lc_copy = ip_img_usr[::1, w - 1:w]
fc_copy = ip_img_usr[::1, 0:1]
new_image = np.zeros((h_1 + 2, w_1 + 2))
pad_main = np.copy(ip_img_usr[0:h, 0:w])
l, b = new_image.shape[:2]
new_image[1:h + 1, 1:w + 1] = pad_main
# first row
new_image[0:1, 1:w + 1] = lr_copy
# last row
new_image[l - 1:l:1, 1:b - 1] = fr_copy
# last column
new_image[1:l - 1, b - 1:b] = fc_copy
# first column
new_image[1:l - 1, 0:1] = lc_copy
new_image = np.array(new_image, dtype=np.uint8)
pad_image=new_image.copy()
gpyr=[pad_image]
lpyr=[]
nh, nw = new_image.shape[:2]
for z in range(num_layers):
if nh<4 or nw<4:
break
gauss_kern = np.array([[0.047459, 0.122933, 0.047459], [0.122933, 0.318432, 0.122933], [0.047459, 0.122933, 0.047459]])
gauss_new_layer = np.zeros((h + 2, w + 2))
for i in range(0, nh - 2):
for j in range(0, nw - 2):
w1 = pad_image[i:i + 3, j:j + 3] * gauss_kern
gauss_new_layer[i, j] = np.sum(w1)
gauss_new_layer = gauss_new_layer.astype("uint8")
downsample_ip= | np.copy(gauss_new_layer) | numpy.copy |
# This script is used to produce fitting and confidence interval for results in python.
#
#%%
import numpy as np
from scipy.optimize import curve_fit
from scipy.stats.distributions import t
#%%
from numpy import cos, sin, exp, pi, meshgrid
def KentFunc(Xin, theta, phi, psi, kappa, beta, A):
# Assume theta_z, phi_z are column vectors ([0,2 pi]), theta, phi, psi are
# rotational scaler ([0,2 pi])
theta_z, phi_z = Xin[:, 0], Xin[:, 1]
Z = np.array([cos(theta_z) * cos(phi_z), sin(theta_z) * cos(phi_z), sin(phi_z)]).T # M by 3 finally
coord = SO3(theta, phi, psi)
mu1 = coord[:, 0:1] # col vector
# mu23 = coord[:, 1:3] # 2 col vectors, 3 by 2
mu2 = coord[:, 1:2] # 2 col vectors, 3 by 2
mu3 = coord[:, 2:3] # 2 col vectors, 3 by 2
fval = A * exp(kappa * Z @ mu1 + beta * ((Z @ mu2) ** 2 - (Z @ mu3) ** 2))
return fval[:, 0]
def KentFunc_bsl(Xin, theta, phi, psi, kappa, beta, A, bsl):
# Assume theta_z, phi_z are column vectors ([0,2 pi]), theta, phi, psi are
# rotational scaler ([0,2 pi])
theta_z, phi_z = Xin[:, 0], Xin[:, 1]
Z = np.array([cos(theta_z) * cos(phi_z), sin(theta_z) * cos(phi_z), sin(phi_z)]).T # M by 3 finally
coord = SO3(theta, phi, psi)
mu1 = coord[:, 0:1] # col vector
# mu23 = coord[:, 1:3] # 2 col vectors, 3 by 2
mu2 = coord[:, 1:2] # 2 col vectors, 3 by 2
mu3 = coord[:, 2:3] # 2 col vectors, 3 by 2
fval = A * exp(kappa * Z @ mu1 + beta * ((Z @ mu2) ** 2 - (Z @ mu3) ** 2)) + bsl
return fval[:, 0]
def SO3(theta, phi, psi):
orig = np.array([[cos(theta)*cos(phi), sin(theta)* | cos(phi) | numpy.cos |
import numpy as np
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_almost_equal
import pytest
from pygbm.plain.splitting import _find_histogram_split
from pygbm.plain.splitting import (SplittingContext, find_node_split,
find_node_split_subtraction,
split_indices)
@pytest.mark.parametrize('n_bins', [3, 32, 256])
def test_histogram_split(n_bins):
rng = np.random.RandomState(42)
feature_idx = 0
l2_regularization = 0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
X_binned = np.asfortranarray(
rng.randint(0, n_bins, size=(int(1e4), 2)), dtype=np.uint8)
binned_feature = X_binned.T[feature_idx]
sample_indices = np.arange(binned_feature.shape[0], dtype=np.uint32)
ordered_hessians = np.ones_like(binned_feature, dtype=np.float32)
all_hessians = ordered_hessians
for true_bin in range(1, n_bins - 1):
for sign in [-1, 1]:
ordered_gradients = np.full_like(binned_feature, sign,
dtype=np.float32)
ordered_gradients[binned_feature <= true_bin] *= -1
all_gradients = ordered_gradients
n_bins_per_feature = np.array([n_bins] * X_binned.shape[1],
dtype=np.uint32)
context = SplittingContext(X_binned,
n_bins,
n_bins_per_feature,
all_gradients, all_hessians,
l2_regularization,
min_hessian_to_split,
min_samples_leaf, min_gain_to_split)
split_info, _ = _find_histogram_split(context, feature_idx,
sample_indices)
assert split_info.bin_idx == true_bin
assert split_info.gain >= 0
assert split_info.feature_idx == feature_idx
assert (split_info.n_samples_left + split_info.n_samples_right
== sample_indices.shape[0])
# Constant hessian: 1. per sample.
assert split_info.n_samples_left == split_info.hessian_left
@pytest.mark.parametrize('constant_hessian', [True, False])
def test_split_vs_split_subtraction(constant_hessian):
# Make sure find_node_split and find_node_split_subtraction return the
# same results.
# Should we add a test about computation time to make sure
# time(subtraction) < time(regular)?
rng = np.random.RandomState(42)
n_bins = 10
n_features = 20
n_samples = 500
l2_regularization = 0.
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
X_binned = rng.randint(0, n_bins, size=(n_samples, n_features),
dtype=np.uint8)
X_binned = np.asfortranarray(X_binned)
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = rng.randn(n_samples).astype(np.float32)
if constant_hessian:
all_hessians = np.ones(1, dtype=np.float32)
else:
all_hessians = rng.lognormal(size=n_samples).astype(np.float32)
n_bins_per_feature = np.array([n_bins] * X_binned.shape[1],
dtype=np.uint32)
context = SplittingContext(X_binned, n_bins,
n_bins_per_feature,
all_gradients, all_hessians,
l2_regularization, min_hessian_to_split,
min_samples_leaf, min_gain_to_split)
mask = rng.randint(0, 2, n_samples).astype(np.bool)
sample_indices_left = sample_indices[mask]
sample_indices_right = sample_indices[~mask]
# first split parent, left and right with classical method
si_parent, hists_parent = find_node_split(context, sample_indices)
si_left, hists_left = find_node_split(context, sample_indices_left)
si_right, hists_right = find_node_split(context, sample_indices_right)
# split left with subtraction method
si_left_sub, hists_left_sub = find_node_split_subtraction(
context, sample_indices_left, hists_parent, hists_right)
# split right with subtraction method
si_right_sub, hists_right_sub = find_node_split_subtraction(
context, sample_indices_right, hists_parent, hists_left)
# make sure histograms from classical and subtraction method are the same
for hists, hists_sub in ((hists_left, hists_left_sub),
(hists_right, hists_right_sub)):
for hist, hist_sub in zip(hists, hists_sub):
for key in ('count', 'sum_hessians', 'sum_gradients'):
assert_array_almost_equal(hist[key], hist_sub[key], decimal=4)
# make sure split_infos from classical and subtraction method are the same
for si, si_sub in ((si_left, si_left_sub), (si_right, si_right_sub)):
assert_almost_equal(si.gain, si_sub.gain, decimal=3)
assert_almost_equal(si.feature_idx, si_sub.feature_idx, decimal=3)
assert_almost_equal(si.gradient_left, si_sub.gradient_left, decimal=3)
assert_almost_equal(si.gradient_right, si_sub.gradient_right,
decimal=3)
| assert_almost_equal(si.hessian_right, si_sub.hessian_right, decimal=3) | numpy.testing.assert_almost_equal |
__author__ = 'chrispaulson'
import numpy as np
import scipy
from scipy.optimize import minimize
from .matrixops import matrixops
import copy
from matplotlib import pyplot as plt
import pylab
from mpl_toolkits.mplot3d import axes3d
from pyKriging import samplingplan
import inspyred
from random import Random
from time import time
from inspyred import ec
import math as m
class kriging(matrixops):
def __init__(self, X, y, testfunction=None, name='', testPoints=None, **kwargs):
self.X = copy.deepcopy(X)
self.y = copy.deepcopy(y)
self.testfunction = testfunction
self.name = name
self.n = self.X.shape[0]
self.k = self.X.shape[1]
self.theta = np.ones(self.k)
self.pl = np.ones(self.k) * 2.
self.sigma = 0
self.normRange = []
self.ynormRange = []
self.normalizeData()
self.sp = samplingplan.samplingplan(self.k)
#self.updateData()
#self.updateModel()
self.thetamin = 1e-5
self.thetamax = 100
self.pmin = 1
self.pmax = 2
# Setup functions for tracking history
self.history = {}
self.history['points'] = []
self.history['neglnlike'] = []
self.history['theta'] = []
self.history['p'] = []
self.history['rsquared'] = [0]
self.history['adjrsquared'] = [0]
self.history['chisquared'] = [1000]
self.history['lastPredictedPoints'] = []
self.history['avgMSE'] = []
if testPoints:
self.history['pointData'] = []
self.testPoints = self.sp.rlh(testPoints)
for point in self.testPoints:
testPrimitive = {}
testPrimitive['point'] = point
if self.testfunction:
testPrimitive['actual'] = self.testfunction(point)[0]
else:
testPrimitive['actual'] = None
testPrimitive['predicted'] = []
testPrimitive['mse'] = []
testPrimitive['gradient'] = []
self.history['pointData'].append(testPrimitive)
else:
self.history['pointData'] = None
matrixops.__init__(self)
def normX(self, X):
'''
:param X: An array of points (self.k long) in physical world units
:return X: An array normed to our model range of [0,1] for each dimension
'''
X = copy.deepcopy(X)
if type(X) is np.float64:
# print self.normRange
return np.array( (X - self.normRange[0][0]) / float(self.normRange[0][1] - self.normRange[0][0]) )
else:
for i in range(self.k):
X[i] = (X[i] - self.normRange[i][0]) / float(self.normRange[i][1] - self.normRange[i][0])
return X
def inversenormX(self, X):
'''
:param X: An array of points (self.k long) in normalized model units
:return X : An array of real world units
'''
X = copy.deepcopy(X)
for i in range(self.k):
X[i] = (X[i] * float(self.normRange[i][1] - self.normRange[i][0] )) + self.normRange[i][0]
return X
def normy(self, y):
'''
:param y: An array of observed values in real-world units
:return y: A normalized array of model units in the range of [0,1]
'''
return (y - self.ynormRange[0]) / (self.ynormRange[1] - self.ynormRange[0])
def inversenormy(self, y):
'''
:param y: A normalized array of model units in the range of [0,1]
:return: An array of observed values in real-world units
'''
return (y * (self.ynormRange[1] - self.ynormRange[0])) + self.ynormRange[0]
def normalizeData(self):
'''
This function is called when the initial data in the model is set.
We find the max and min of each dimension and norm that axis to a range of [0,1]
'''
for i in range(self.k):
self.normRange.append([min(self.X[:, i]), max(self.X[:, i])])
# print self.X
for i in range(self.n):
self.X[i] = self.normX(self.X[i])
self.ynormRange.append(min(self.y))
self.ynormRange.append(max(self.y))
for i in range(self.n):
self.y[i] = self.normy(self.y[i])
def addPoint(self, newX, newy, norm=True):
'''
This add points to the model.
:param newX: A new design vector point
:param newy: The new observed value at the point of X
:param norm: A boolean value. For adding real-world values, this should be True. If doing something in model units, this should be False
'''
if norm:
newX = self.normX(newX)
newy = self.normy(newy)
self.X = np.append(self.X, [newX], axis=0)
self.y = np.append(self.y, newy)
self.n = self.X.shape[0]
self.updateData()
while True:
try:
self.updateModel()
except:
self.train()
else:
break
def update(self, values):
'''
The function sets new hyperparameters
:param values: the new theta and p values to set for the model
'''
for i in range(self.k):
self.theta[i] = values[i]
for i in range(self.k):
self.pl[i] = values[i + self.k]
self.updateModel()
def updateModel(self):
'''
The function rebuilds the Psi matrix to reflect new data or a change in hyperparamters
'''
try:
self.updatePsi()
except Exception as err:
#pass
# print Exception, err
raise Exception("bad params")
def predict(self, X):
'''
This function returns the prediction of the model at the real world coordinates of X
:param X: Design variable to evaluate
:return: Returns the 'real world' predicted value
'''
X = copy.deepcopy(X)
X = self.normX(X)
return self.inversenormy(self.predict_normalized(X))
def predict_var(self, X):
'''
The function returns the model's predicted 'error' at this point in the model.
:param X: new design variable to evaluate, in physical world units
:return: Returns the posterior variance (model error prediction)
'''
X = copy.deepcopy(X)
X = self.normX(X)
# print X, self.predict_normalized(X), self.inversenormy(self.predict_normalized(X))
return self.predicterr_normalized(X)
def expimp(self, x):
'''
Returns the expected improvement at the design vector X in the model
:param x: A real world coordinates design vector
:return EI: The expected improvement value at the point x in the model
'''
S = self.predicterr_normalized(x)
y_min = np.min(self.y)
if S <= 0.:
EI = 0.
elif S > 0.:
EI_one = ((y_min - self.predict_normalized(x)) * (0.5 + 0.5*m.erf((
1./np.sqrt(2.))*((y_min - self.predict_normalized(x)) /
S))))
EI_two = ((S * (1. / np.sqrt(2. * np.pi))) * (np.exp(-(1./2.) *
((y_min - self.predict_normalized(x))**2. / S**2.))))
EI = EI_one + EI_two
return EI
def weightedexpimp(self, x, w):
"""weighted expected improvement (Sobester et al. 2005)"""
S = self.predicterr_normalized(x)
y_min = np.min(self.y)
if S <= 0.:
EI = 0.
elif S > 0.:
EI_one = w*((y_min - self.predict_normalized(x)) * (0.5 +
0.5*m.erf((1./np.sqrt(2.))*((y_min -
self.predict_normalized(x)) / S))))
EI_two = ((1. - w)*(S * (1. / np.sqrt(2. * np.pi))) *
(np.exp(-(1./2.) * ((y_min -
self.predict_normalized(x))**2. / S**2.))))
EI = EI_one + EI_two
return EI
def infill_objective_mse(self,candidates, args):
'''
This acts
:param candidates: An array of candidate design vectors from the infill global optimizer
:param args: args from the optimizer
:return fitness: An array of evaluated MSE values for the candidate population
'''
fitness = []
for entry in candidates:
fitness.append(-1 * self.predicterr_normalized(entry))
return fitness
def infill_objective_ei(self,candidates, args):
'''
The infill objective for a series of candidates from infill global search
:param candidates: An array of candidate design vectors from the infill global optimizer
:param args: args from the optimizer
:return fitness: An array of evaluated Expected Improvement values for the candidate population
'''
fitness = []
for entry in candidates:
fitness.append(-1 * self.expimp(entry))
return fitness
def infill(self, points, method='error', addPoint=True):
'''
The function identifies where new points are needed in the model.
:param points: The number of points to add to the model. Multiple points are added via imputation.
:param method: Two choices: EI (for expected improvement) or Error (for general error reduction)
:return: An array of coordinates identified by the infill
'''
# We'll be making non-permanent modifications to self.X and self.y here, so lets make a copy just in case
initX = np.copy(self.X)
inity = np.copy(self.y)
# This array will hold the new values we add
returnValues = | np.zeros([points, self.k], dtype=float) | numpy.zeros |
# -*- coding: utf-8 -*-
'''
Copyright 2015 by <NAME>
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: <NAME>
This class holds example code how to use the dream algorithm
'''
import numpy as np
import spotpy
from spotpy.examples.spot_setup_hymod_python import spot_setup
import matplotlib.pyplot as plt
if __name__ == "__main__":
parallel ='seq' # Runs everthing in sequential mode
np.random.seed(2000) # Makes the results reproduceable
# Initialize the Hymod example
# In this case, we tell the setup which algorithm we want to use, so
# we can use this exmaple for different algorithms
spot_setup=spot_setup(_used_algorithm='sceua')
#Select number of maximum allowed repetitions
rep=500
filename = 'SCEUA_hymod'
# Create the SCE-UA sampler of spotpy, alt_objfun is set to None to force SPOTPY
# to jump into the def objectivefunction in the spot_setup class (default is
# spotpy.objectivefunctions.rmse)
sampler=spotpy.algorithms.sceua(spot_setup, dbname='SCEUA_hymod', dbformat='csv')
#Start the sampler, one can specify ngs, kstop, peps and pcento id desired
sampler.sample(rep)#,ngs=10, kstop=50, peps=0.1, pcento=0.1)
# Load the results gained with the sceua sampler, stored in SCEUA_hymod.csv
results = spotpy.analyser.load_csv_results('SCEUA_hymod')
print(len(results), 'runs were saved.')
fig= plt.figure(1,figsize=(9,5))
plt.plot(results['like1'])
plt.show()
plt.ylabel('RMSE')
plt.xlabel('Iteration')
fig.savefig('hymod_objectivefunction.png',dpi=300)
# Example plot to show the parameter distribution ######
fig= plt.figure(2,figsize=(9,9))
normed_value = 1
plt.subplot(5,2,1)
x = results['parcmax']
for i in range(int(max(results['chain'])-1)):
index=np.where(results['chain']==i+1) #Ignores burn-in chain
plt.plot(x[index],'.')
plt.ylabel('cmax')
plt.ylim(spot_setup.cmax.minbound, spot_setup.cmax.maxbound)
plt.subplot(5,2,2)
x = x[int(len(results)*0.9):] #choose the last 10% of the sample
hist, bins = np.histogram(x, bins=20, density=True)
widths = np.diff(bins)
hist *= normed_value
plt.bar(bins[:-1], hist, widths)
plt.ylabel('cmax')
plt.xlim(spot_setup.cmax.minbound, spot_setup.cmax.maxbound)
plt.subplot(5,2,3)
x = results['parbexp']
for i in range(int(max(results['chain'])-1)):
index=np.where(results['chain']==i+1)
plt.plot(x[index],'.')
plt.ylabel('bexp')
plt.ylim(spot_setup.bexp.minbound, spot_setup.bexp.maxbound)
plt.subplot(5,2,4)
x = x[int(len(results)*0.9):]
hist, bins = np.histogram(x, bins=20, density=True)
widths = np.diff(bins)
hist *= normed_value
plt.bar(bins[:-1], hist, widths)
plt.ylabel('bexp')
plt.xlim(spot_setup.bexp.minbound, spot_setup.bexp.maxbound)
plt.subplot(5,2,5)
x = results['paralpha']
for i in range(int(max(results['chain'])-1)):
index=np.where(results['chain']==i+1)
plt.plot(x[index],'.')
plt.ylabel('alpha')
plt.ylim(spot_setup.alpha.minbound, spot_setup.alpha.maxbound)
plt.subplot(5,2,6)
x = x[int(len(results)*0.9):]
hist, bins = np.histogram(x, bins=20, density=True)
widths = np.diff(bins)
hist *= normed_value
plt.bar(bins[:-1], hist, widths)
plt.ylabel('alpha')
plt.xlim(spot_setup.alpha.minbound, spot_setup.alpha.maxbound)
plt.subplot(5,2,7)
x = results['parKs']
for i in range(int(max(results['chain'])-1)):
index=np.where(results['chain']==i+1)
plt.plot(x[index],'.')
plt.ylabel('Ks')
plt.ylim(spot_setup.Ks.minbound, spot_setup.Ks.maxbound)
plt.subplot(5,2,8)
x = x[int(len(results)*0.9):]
hist, bins = | np.histogram(x, bins=20, density=True) | numpy.histogram |
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
def distributed(x,L, *args):
"""Most basic distributed consensus algorithm
It's actually gradient descent of 1/2(xLx)"""
return -np.dot(L,x)
def distributed_random_topology(x,graph, proportion=0.5, *args):
"""Same as dynamics but randomly rewires
the graph edge connections."""
nx.connected_double_edge_swap(graph,np.floor(proportion*len(graph.nodes())))
L = nx.laplacian_matrix(graph)
L = L.todense()
return - | np.dot(L,x) | numpy.dot |
from numpy.ma import add
import pandas as pd
import numpy as np
np.seterr(divide='ignore')
import scipy.signal as signal
import scipy.stats as stats
import matplotlib.pyplot as plt
import statsmodels
import statsmodels.api as sm
import statsmodels.formula.api as smf
import statsmodels.stats.multitest as multi
from scipy.optimize import curve_fit
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from scipy.stats import percentileofscore
from scipy.stats import circstd, circmean
import copy
import itertools
from matplotlib.lines import Line2D
from random import sample
import os
from skopt.space import Space
from skopt.sampler import Lhs
def periodogram_df(df, folder = '', **kwargs):
names = list(df.test.unique())
names.sort()
for name in names:
x, y = np.array(df[df.test == name].x), np.array(df[df.test == name].y)
if folder:
save_to = os.path.join(folder, "per_" + name)
else:
save_to = ""
periodogram(x,y, save_to = save_to, name=name, **kwargs)
def periodogram(X, Y, per_type='per', sampling_f = '', logscale = False, name = '', save_to = '', prominent = False, max_per = 240):
if per_type == 'per' or per_type == 'welch':
X_u = np.unique(X)
Y_u = []
for x_u in X_u:
#y_u.append(np.mean(y[t == x]))
Y_u.append(np.median(Y[x_u == X]))
if not sampling_f:
sampling_f = 1/(X[1]-X[0])
Y = Y_u
if per_type == 'per':
# Fourier
f, Pxx_den = signal.periodogram(Y,sampling_f)
elif per_type =='welch':
# Welch
f, Pxx_den = signal.welch(Y,sampling_f)
elif per_type == 'lombscargle':
# Lomb-Scargle
min_per = 2
#max_per = 50
f = np.linspace(1/max_per, 1/min_per, 10)
Pxx_den = signal.lombscargle(X, Y, f)
else:
print("Invalid option")
return
# significance
# Refinetti et al. 2007
p_t = 0.05
N = len(Y)
T = (1 - (p_t/N)**(1/(N-1))) * sum(Pxx_den) # threshold for significance
if f[0] == 0:
per = 1/f[1:]
Pxx = Pxx_den[1:]
else:
per = 1/f
Pxx = Pxx_den
Pxx = Pxx[per <= max_per]
per = per[per <= max_per]
try:
if logscale:
plt.semilogx(per, Pxx, 'ko')
plt.semilogx(per, Pxx, 'k--', linewidth=0.5)
plt.semilogx([min(per), max(per)], [T, T], 'k--', linewidth=1)
else:
plt.plot(per, Pxx, 'ko')
plt.plot(per, Pxx, 'k--', linewidth=0.5)
plt.plot([min(per), max(per)], [T, T], 'k--', linewidth=1)
except:
print("Could not plot!")
return
peak_label = ''
if prominent:
locs, heights = signal.find_peaks(Pxx, height = T)
if any(locs):
heights = heights['peak_heights']
s = list(zip(heights, locs))
s.sort(reverse=True)
heights, locs = zip(*s)
heights = np.array(heights)
locs = np.array(locs)
peak_label = ', max peak=' + str(per[locs[0]])
else:
locs = Pxx >= T
if any(locs):
heights, locs = Pxx[locs], per[locs]
HL = list(zip(heights, locs))
HL.sort(reverse = True)
heights, locs = zip(*HL)
peak_label = ', peaks=\n'
locs = locs[:11]
for loc in locs[:-1]:
peak_label += "{:.2f}".format(loc) + ','
peak_label += "{:.2f}".format(locs[-1])
plt.xlabel('period [hours]')
plt.ylabel('PSD')
plt.title(name + peak_label)
if save_to:
plt.savefig(save_to+'.pdf')
plt.savefig(save_to+'.png')
plt.close()
else:
plt.show()
def remove_lin_comp_df(df, n_components = 0, period = 24, summary_file=""):
df2 = pd.DataFrame(columns=df.columns)
if summary_file:
df_fit = pd.DataFrame(columns=['test', 'k', 'CI', 'p', 'q'])
for test in df.test.unique():
x,y = df[df['test']==test].x,df[df['test']==test].y
x,y,fit = remove_lin_comp(x,y,n_components=n_components, period=period, return_fit=True)
df_tmp = pd.DataFrame(columns=df.columns)
df_tmp['x'] = x
df_tmp['y'] = y
df_tmp['test'] = test
df2 = df2.append(df_tmp, ignore_index=True)
if summary_file:
fit['test'] = test
df_fit=df_fit.append(fit, ignore_index=True)
if summary_file:
df_fit.q = multi.multipletests(df_fit.p, method = 'fdr_bh')[1]
if summary_file.endswith("csv"):
df_fit.to_csv(summary_file, index=False)
elif summary_file.endswith("xlsx"):
df_fit.to_excel(summary_file, index=False)
return df2
def remove_lin_comp(X, Y, n_components = 0, period = 24, return_fit=False):
X = np.array(X)
Y = np.array(Y)
X_fit = generate_independents(X, n_components = n_components, period = period, lin_comp = True)
model = sm.OLS(Y, X_fit)
results = model.fit()
CIs = results.conf_int()
if type(CIs) != np.ndarray:
CIs = CIs.values
CI = CIs[1]
#A = results.params[0]
k = results.params[1]
"""
X_lin = np.zeros(X_fit.shape)
X_lin[:,1] = X_fit[:,1]
Y_lin = results.predict(X_lin)
Y = Y-Y_lin
"""
#Y_fit = results.predict(X_fir)
#Y = Y - Y_fit
#Y = Y - A - k*X
if CI[0] * CI[1] > 0: # if both CIs hve the same sign
Y = Y - k*X
if return_fit:
fit = {}
fit['k'] = results.params[1]
fit['CI'] = CI
fit['p'] = results.pvalues[1]
return X,Y,fit
"""
X_fit = generate_independents(X, n_components = n_components, period = period, lin_comp = False)
model = sm.OLS(Y, X_fit)
results = model.fit()
plt.plot(X, results.fittedvalues, color="black")
"""
return X, Y
# prepare the independent variables
def generate_independents(X, n_components = 3, period = 24, lin_comp = False, remove_lin_comp = False):
if n_components == 0:
X_fit = X
lin_comp = True
else:
for i in np.arange(n_components):
n = i+1
A = np.sin((X/(period/n))*np.pi*2)
B = np.cos((X/(period/n))*np.pi*2)
if not i:
X_fit = np.column_stack((A, B))
else:
X_fit = np.column_stack((X_fit, np.column_stack((A, B))))
if lin_comp and n_components:
X_fit = np.column_stack((X, X_fit))
if remove_lin_comp:
X_fit[:,0] = 0
X_fit = sm.add_constant(X_fit, has_constant='add')
return X_fit
# prepare the independent variables for limorhyde
def generate_independents_compare(X1, X2, n_components1 = 3, period1 = 24, n_components2 = 3, period2 = 24, lin_comp = False, non_rhythmic=False, remove_lin_comp=False):
H1 = np.zeros(X1.size)
H2 = np.ones(X2.size)
X = np.concatenate((X1, X2))
H_i = np.concatenate((H1, H2))
X_i = H_i * X
for i in np.arange(n_components1):
n = i+1
A = np.sin((X/(period1/n))*np.pi*2)
B = np.cos((X/(period1/n))*np.pi*2)
if not i:
X_fit = np.column_stack((A, B))
else:
X_fit = np.column_stack((X_fit, np.column_stack((A, B))))
if non_rhythmic:
X_fit = np.column_stack((X_fit, H_i))
else:
for i in np.arange(n_components2):
n = i+1
A_i = H_i * np.sin((X/(period2/n))*np.pi*2)
B_i = H_i * np.cos((X/(period2/n))*np.pi*2)
X_fit = np.column_stack((X_fit, np.column_stack((A_i, B_i))))
X_fit = np.column_stack((X_fit, H_i))
if lin_comp:
X_fit = np.column_stack((X_i, X_fit))
X_fit = np.column_stack((X, X_fit))
if remove_lin_comp:
X_fit[:,0] = 0
X_fit[:,1] = 0
X_fit = sm.add_constant(X_fit, has_constant='add')
return X_fit
"""
*****************************
* start of finding the best *
*****************************
"""
def get_best_fits(df_results, criterium = 'R2_adj', reverse = False, n_components = []):
df_best = pd.DataFrame(columns = df_results.columns, dtype=float)
names = np.unique(df_results.test)
for name in names:
if n_components:
for n_comp in n_components:
if reverse:
M = df_results[(df_results.test == name) & (df_results.n_components == n_comp)][criterium].min()
else:
M = df_results[(df_results.test == name) & (df_results.n_components == n_comp)][criterium].max()
df_best = df_best.append(df_results[(df_results.test == name) & (df_results.n_components == n_comp) & (df_results[criterium] == M)], ignore_index = True)
else:
M = df_results[df_results.test == name][criterium].max()
df_best = df_best.append(df_results[(df_results.test == name) & (df_results[criterium] == M)], ignore_index = True)
return df_best
def get_best_models_population(df, df_models, n_components = [1,2,3], lin_comp = False, criterium = 'RSS', reverse = True):
names = np.unique(df_models.test)
df_best = pd.DataFrame(columns = df_models.columns, dtype=float)
df_models = get_best_fits(df_models, criterium = criterium, reverse = reverse, n_components=n_components)
for test in names:
n_points = df[df.test.str.startswith(test)].x.shape[0] # razlika med get_best_models in get_best_models_population
df_test_models = df_models[df_models.test == test]
df_test_models = df_test_models.sort_values(by=['n_components'])
i = 0
for new_row in df_test_models.iterrows():
if i == 0:
best_row = new_row
i = 1
else:
RSS_reduced = best_row[1].RSS
RSS_full = new_row[1].RSS
DF_reduced = n_points - (best_row[1].n_components * 2 + 1)
DF_full = n_points - (new_row[1].n_components * 2 + 1)
if lin_comp:
DF_reduced -= 1
DF_full -= 1
#print (test, old_row[1].n_components, new_row[1].n_components)
if compare_models(RSS_reduced, RSS_full, DF_reduced, DF_full) < 0.05:
best_row = new_row
df_best = df_best.append(best_row[1], ignore_index=True)
return df_best
# compare two models according to the F-test
# http://people.reed.edu/~jones/Courses/P24.pdf
# https://www.graphpad.com/guides/prism/7/curve-fitting/index.htm?reg_howtheftestworks.htm
def get_best_models(df, df_models, n_components = [1,2,3], lin_comp = False, criterium='p', reverse = True):
names = np.unique(df_models.test)
df_best = pd.DataFrame(columns = df_models.columns, dtype=float)
df_models = get_best_fits(df_models, n_components = n_components, criterium=criterium, reverse = reverse)
for test in names:
n_points = df[df.test == test].x.shape[0]
df_test_models = df_models[df_models.test == test]
df_test_models = df_test_models.sort_values(by=['n_components'])
i = 0
for new_row in df_test_models.iterrows():
if i == 0:
best_row = new_row
i = 1
else:
RSS_reduced = best_row[1].RSS
RSS_full = new_row[1].RSS
DF_reduced = n_points - (best_row[1].n_components * 2 + 1)
DF_full = n_points - (new_row[1].n_components * 2 + 1)
if lin_comp:
DF_reduced -= 1
DF_full -= 1
#print (test, old_row[1].n_components, new_row[1].n_components)
if compare_models(RSS_reduced, RSS_full, DF_reduced, DF_full) < 0.05:
best_row = new_row
df_best = df_best.append(best_row[1], ignore_index=True)
return df_best
"""
***************************
* end of finding the best *
***************************
"""
"""
************
* plotting *
************
"""
def plot_data(df, names = [], folder = '', prefix = '', color='black'):
if not names:
names = np.unique(df.test)
for test in names:
X, Y = np.array(df[df.test == test].x), np.array(df[df.test == test].y)
plt.plot(X,Y,'o', markersize=1, color=color)
plt.title(test)
#test = test.replace("$","")
#fig = plt.gcf()
#fig.set_size_inches(11,8)
if folder:
plt.savefig(os.path.join(folder, prefix+test+'.png'))
plt.savefig(os.path.join(folder, prefix+test+'.pdf'))
plt.close()
else:
plt.show()
def plot_data_pairs(df, names, folder = '', prefix ='', color1='black', color2='red'):
for test1, test2 in names:
X1, Y1 = np.array(df[df.test == test1].x), np.array(df[df.test == test1].y)
X2, Y2 = np.array(df[df.test == test2].x), np.array(df[df.test == test2].y)
plt.plot(X1,Y1,'o', color=color1, markersize=1, label=test1)
plt.plot(X2,Y2,'o', color=color2, markersize=1, label=test2)
plt.legend()
plt.title(test1 + ' vs. ' + test2)
if folder:
plt.savefig(os.path.join(folder,prefix+test1+'_'+test2+'.png'))
plt.savefig(os.path.join(folder,prefix+test1+'_'+test2+'.pdf'))
plt.close()
else:
plt.show()
def plot_components(X, Y, n_components = 3, period = 24, name = '', save_to = ''):
A = np.sin((X/period)*np.pi*2)
B = np.cos((X/period)*np.pi*2)
C = np.sin((X/(period/2))*np.pi*2)
D = np.cos((X/(period/2))*np.pi*2)
E = np.sin((X/(period/3))*np.pi*2)
F = np.cos((X/(period/3))*np.pi*2)
#G = np.sin((X/(period/4))*np.pi*2)
#H = np.cos((X/(period/4))*np.pi*2)
fig, axs = plt.subplots(n_components, 2, constrained_layout=True)
fig.suptitle(name, fontsize=16)
axs[0,0].plot(A, Y,'.')
axs[0,0].set(xlabel = 'sin((x/'+str(period)+') * 2$\pi$)')
axs[0,1].plot(B, Y,'.')
axs[0,1].set(xlabel = 'cos((x/'+str(period)+') * 2$\pi$)')
if n_components >= 2:
axs[1,0].plot(C, Y,'.')
axs[1,0].set(xlabel = 'sin((x/'+str(period/2)+') * 2$\pi$)')
axs[1,1].plot(D, Y,'.')
axs[1,1].set(xlabel = 'cos((x/'+str(period/2)+') * 2$\pi$)')
if n_components == 3:
axs[2,0].plot(E, Y,'.')
axs[2,0].set(xlabel = 'sin((x/'+str(period/3)+') * 2$\pi$)')
axs[2,1].plot(F, Y,'.')
axs[2,1].set(xlabel = 'cos((x/'+str(period/3)+') * 2$\pi$)')
if n_components == 4:
axs[3,0].plot(E, Y,'.')
axs[3,0].set(xlabel = 'sin((x/'+str(period/4)+') * 2$\pi$)')
axs[3,1].plot(F, Y,'.')
axs[3,1].set(xlabel = 'cos((x/'+str(period/4)+') * 2$\pi$)')
for ax in axs.flat:
ax.set(ylabel = 'y')
if save_to:
plt.savefig(save_to+'.pdf')
plt.savefig(save_to+'.png')
plt.close()
else:
plt.show()
def plot_phases(acrs, amps, tests, period=24, colors = ("black", "red", "green", "blue"), folder = "", prefix="", legend=True, CI_acrs = [], CI_amps = [], linestyles = [], title = "", labels = []):#, plot_measurements = False, measurements=None):
acrs = np.array(acrs, dtype = float)
amps = np.array(amps, dtype = float)
if colors and len(colors) < len(tests):
colors += ("black",) * (len(tests)-len(colors))
x = np.arange(0, 2*np.pi, np.pi/4)
x_labels = list(map(lambda i: 'CT ' + str(i) + " ", list((x/(2*np.pi) * period).astype(int))))
x_labels[1::2] = [""]*len(x_labels[1::2])
ampM = np.max(amps)
amps /= ampM
acrs = -acrs
fig = plt.figure()
ax = fig.add_subplot(projection='polar')
ax.set_theta_offset(0.5*np.pi)
ax.set_theta_direction(-1)
lines = []
for i, (acr, amp, test, color) in enumerate(zip(acrs, amps, tests, colors)):
"""
if "LDL" in test:
color = "#FF0000"
elif "HDL" in test:
color = "#0000FF"
elif "CHOL" in test:
color = "#00FF00"
elif "control" in test.lower():
color = "#000000"
else:
color = "#0000FF"
"""
if linestyles:
#ax.plot([acr, acr], [0, amp], label=test, color=color, linestyle = linestyles[i])
ax.annotate("", xy=(acr, amp), xytext=(0, 0), arrowprops=dict(arrowstyle="->", color=color, alpha = 0.75, linewidth=2, linestyle = linestyles[i]) )
lines.append(Line2D([0], [0], color=color, linewidth=2, linestyle=linestyles[i]))
else:
#ax.plot([acr, acr], [0, amp], label=test, color=color)
ax.annotate("", xy=(acr, amp), xytext=(0, 0), arrowprops=dict(arrowstyle="->", color=color, alpha = 0.75, linewidth=2) )
lines.append(Line2D([0], [0], color=color, linewidth=2))
#ax.plot([acr, acr], [0, amp], label=test, color=color)
#ax.annotate("", xy=(acr, amp), xytext=(0, 0), arrowprops=dict(arrowstyle="->", color=color, linewidth=2) )
if CI_acrs and CI_amps:
amp_l, amp_u = np.array(CI_amps[i])/ampM
amp_l = max(0, amp_l)
amp_u = min(1, amp_u)
acr_l, acr_u = -np.array(CI_acrs[i])
if acr_l - acr_u > 2*np.pi:
plt.fill_between(np.linspace(0, np.pi*2, 1000), amp_l, amp_u, color=color, alpha=0.1)
elif acr_u < acr_l:
acr_l, acr_u = acr_u, acr_l
plt.fill_between(np.linspace(acr_l, acr_u, 1000), amp_l, amp_u, color=color, alpha=0.1)
ax.set_rmax(1)
ax.set_rticks([0.5]) # Less radial ticks
ax.set_yticklabels([""])
ax.set_xticks(x)
ax.set_xticklabels(x_labels)
ax.grid(True)
ax.set_facecolor('#f0f0f0')
"""
for i, (acr, amp, test, color) in enumerate(zip(acrs, amps, tests, colors)):
if plot_measurements:
try:
x,y = measurements
except:
df = measurements
x,y=df[df.test == test].x, df[df.test == test].y
plt.plot(x,y,'o',markersize=1, alpha = 0.75, color=color)
"""
name = "_".join(tests)
#ax.set_title(name, va='bottom')
if title:
ax.set_title(title, va='bottom')
else:
ax.set_title(name, va='bottom')
if legend:
if labels:
plt.legend(lines, labels, bbox_to_anchor=(1.0, 1), loc='upper left', borderaxespad=0., frameon=False)
else:
plt.legend(lines, tests, bbox_to_anchor=(1.0, 1), loc='upper left', borderaxespad=0., frameon=False)
#ax.legend()
if folder:
plt.savefig(os.path.join(folder,prefix+name+"_phase.pdf"))
plt.savefig(os.path.join(folder,prefix+name+"_phase.png"))
plt.close()
else:
plt.show()
"""
*******************
* end of plotting *
*******************
"""
"""
*****************************
* start of fitting wrappers *
*****************************
"""
def fit_group(df, n_components = 2, period = 24, names = "", folder = '', prefix='', **kwargs):
df_results = pd.DataFrame(columns = ['test', 'period', 'n_components', 'p', 'q', 'p_reject', 'q_reject', 'RSS', 'R2', 'R2_adj', 'log-likelihood', 'amplitude', 'acrophase', 'mesor', 'peaks', 'heights', 'troughs', 'heights2'], dtype=float)
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
if not any(names):
names = np.unique(df.test)
for test in names:
for n_comps in n_components:
for per in period:
if n_comps == 0:
per = 100000
X, Y = np.array(df[df.test == test].x), np.array(df[df.test == test].y)
if folder:
save_to = os.path.join(folder,prefix+test+'_compnts='+str(n_comps) +'_per=' + str(per))
else:
save_to = ''
results, statistics, rhythm_param, _, _ = fit_me(X, Y, n_components = n_comps, period = per, name = test, save_to = save_to, **kwargs)
try:
R2, R2_adj = results.rsquared,results.rsquared_adj
except:
R2, R2_adj = np.nan, np.nan
df_results = df_results.append({'test': test,
'period': per,
'n_components': n_comps,
'p': statistics['p'],
'p_reject': statistics['p_reject'],
'RSS': statistics['RSS'],
'R2': R2,
'R2_adj': R2_adj,
'ME': statistics['ME'],
'resid_SE': statistics['resid_SE'],
'log-likelihood': results.llf,
'amplitude': rhythm_param['amplitude'],
'acrophase': rhythm_param['acrophase'],
'mesor': rhythm_param['mesor'],
'peaks': rhythm_param['peaks'],
'heights': rhythm_param['heights'],
'troughs': rhythm_param['troughs'],
'heights2': rhythm_param['heights2']
}, ignore_index=True)
if n_comps == 0:
break
df_results.q = multi.multipletests(df_results.p, method = 'fdr_bh')[1]
df_results.q_reject = multi.multipletests(df_results.p_reject, method = 'fdr_bh')[1]
return df_results
def population_fit_group(df, n_components = 2, period = 24, folder = '', prefix='', names = [], **kwargs):
df_results = pd.DataFrame(columns = ['test', 'period', 'n_components', 'p', 'q', 'p_reject', 'q_reject', 'RSS', 'amplitude', 'acrophase', 'mesor'], dtype=float)
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
if not any(names):
names = np.unique(df.test)
names = list(set(list(map(lambda x:x.split('_rep')[0], names))))
names.sort()
for name in set(names):
for n_comps in n_components:
for per in period:
if n_comps == 0:
per = 100000
df_pop = df[df.test.str.startswith(name)]
if folder:
save_to=os.path.join(folder,prefix+name+'_compnts='+str(n_comps) +'_per=' + str(per))
_, statistics, _, rhythm_params, _ = population_fit(df_pop, n_components = n_comps, period = per, save_to = save_to, **kwargs)
else:
_, statistics, _, rhythm_params, _ = population_fit(df_pop, n_components = n_comps, period = per, **kwargs)
df_results = df_results.append({'test': name,
'period': per,
'n_components': n_comps,
'p': statistics['p'],
'p_reject': statistics['p_reject'],
'RSS': statistics['RSS'],
'ME': statistics['ME'],
'resid_SE': statistics['resid_SE'],
'amplitude': rhythm_params['amplitude'],
'acrophase': rhythm_params['acrophase'],
'mesor': rhythm_params['mesor']}, ignore_index=True)
if n_comps == 0:
break
df_results.q = multi.multipletests(df_results.p, method = 'fdr_bh')[1]
df_results.q_reject = multi.multipletests(df_results.p_reject, method = 'fdr_bh')[1]
return df_results
"""
***************************
* end of fitting wrappers *
***************************
"""
"""
******************************
* start of fitting functions *
******************************
"""
def population_fit(df_pop, n_components = 2, period = 24, lin_comp= False, model_type = 'lin', plot = True, plot_measurements=True, plot_individuals=True, plot_margins=True, hold = False, save_to = '', x_label='', y_label='', return_individual_params = False, params_CI = False, samples_per_param_CI=5, max_samples_CI = 1000, sampling_type = "LHS", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], color="black", **kwargs):
if return_individual_params:
ind_params = {}
for param in parameters_to_analyse:
ind_params[param] = []
params = -1
tests = df_pop.test.unique()
k = len(tests)
#X_test = np.linspace(0, 2*period, 1000)
#X_fit_eval_params = generate_independents(X_test, n_components = n_components, period = period, lin_comp = lin_comp)
#if lin_comp:
# X_fit_eval_params[:,1] = 0
min_X = np.min(df_pop.x.values)
max_X = np.max(df_pop.x.values)
min_Y = np.min(df_pop.y.values)
max_Y = np.max(df_pop.y.values)
if plot:
if plot_measurements:
X_plot = np.linspace(min(min_X,0), 1.1*max(max_X,period), 1000)
else:
X_plot = np.linspace(0, 1.1*period, 1000)
X_plot_fits = generate_independents(X_plot, n_components = n_components, period = period, lin_comp = lin_comp)
#if lin_comp:
# X_plot_fits[:,1] = 0
"""
min_X = 1000
max_X = 0
min_Y = 1000
max_Y = 0
min_X_test = np.min(X_test)
"""
min_Y_test = 1000
max_Y_test = 0
for test in tests:
x,y = np.array(df_pop[df_pop.test == test].x), np.array(df_pop[df_pop.test == test].y)
"""
min_X = min(min_X, np.min(x))
max_X = max(max_X, np.max(x))
min_Y = min(min_Y, np.min(y))
max_Y = max(max_Y, np.max(y))
"""
results, statistics, rhythm_params, X_test, Y_test, model = fit_me(x, y, n_components = n_components, period = period, plot = False, return_model = True, lin_comp=lin_comp, **kwargs)
X_fit_eval_params = generate_independents(X_test, n_components = n_components, period = period, lin_comp = lin_comp, remove_lin_comp=True)
if lin_comp:
X_fit_eval_params[:,1] = 0
if return_individual_params:
Y_eval_params = results.predict(X_fit_eval_params)
rhythm_ind_params = evaluate_rhythm_params(X_test, Y_eval_params, period=period)
for param in parameters_to_analyse:
ind_params[param].append(rhythm_ind_params[param])
if (plot and plot_individuals):
#Y_eval_params = results.predict(X_fit_eval_params)
Y_plot_fits = results.predict(X_plot_fits)
if (plot and plot_individuals):
if not hold:
plt.plot(X_plot,Y_plot_fits,color=color, alpha=0.25, label=test)
else:
plt.plot(X_plot,Y_plot_fits,color=color, alpha=0.25)
min_Y_test = min(min_Y_test, np.min(Y_plot_fits))
max_Y_test = max(max_Y_test, np.max(Y_plot_fits))
if plot and plot_measurements:
plt.plot(x,y,'o', color=color, markersize=1)
if type(params) == int:
params = results.params
if plot and plot_margins:
#_, lowers, uppers = wls_prediction_std(results, exog=X_fit_eval_params, alpha=0.05)
Y_plot_fits_all = Y_plot_fits
else:
params = np.vstack([params, results.params])
if plot and plot_margins:
#_, l, u = wls_prediction_std(results, exog=X_fit_eval_params, alpha=0.05)
#lowers = np.vstack([lowers, l])
#uppers = np.vstack([uppers, u])
Y_plot_fits_all = np.vstack([Y_plot_fits_all, Y_plot_fits])
# parameter statistics: means, variances, stadndard deviations, confidence intervals, p-values
#http://reliawiki.com/index.php/Multiple_Linear_Regression_Analysis
if k > 1:
means = np.mean(params, axis=0)
variances = np.sum((params-np.mean(params, axis=0))**2, axis = 0)/(k-1) # np.var(params, axis=0) # isto kot var z ddof=k-1
sd = variances**0.5
se = sd/((k-1)**0.5)
T0 = means/se
p_values = 2 * (1 - stats.t.cdf(abs(T0), k-1))
t = abs(stats.t.ppf(0.05/2,df=k-1))
lower_CI = means - ((t*sd)/((k-1)**0.5))
upper_CI = means + ((t*sd)/((k-1)**0.5))
results.initialize(model, means)
else:
means = params
sd = np.zeros(len(params))
sd[:] = np.nan
se = np.zeros(len(params))
se[:] = np.nan
lower_CI = means
upper_CI = means
p_values = np.zeros(len(params))
p_values[:] = np.nan
x,y = df_pop.x, df_pop.y
xy = list(zip(x,y))
xy.sort()
x,y = zip(*xy)
x,y = np.array(x), np.array(y)
X_fit = generate_independents(x, n_components = n_components, period = period, lin_comp = lin_comp)
Y_fit = results.predict(X_fit)
Y_eval_params = results.predict(X_fit_eval_params)
rhythm_params = evaluate_rhythm_params(X_test, Y_eval_params, period=period)
if plot:
pop_name = "_".join(test.split("_")[:-1])
Y_plot_fits = results.predict(X_plot_fits)
if not hold:
plt.plot(X_plot,Y_plot_fits, color=color, label="population fit")
else:
plt.plot(X_plot,Y_plot_fits, color=color, label=pop_name)
plt.legend()
if x_label:
plt.xlabel(x_label)
else:
plt.xlabel('time [h]')
if y_label:
plt.ylabel(y_label)
else:
plt.ylabel('measurements')
min_Y_test = min(min_Y_test, np.min(Y_eval_params))
max_Y_test = max(max_Y_test, np.max(Y_eval_params))
if plot and plot_margins:
if k == 1:
_, lower, upper = wls_prediction_std(results, exog=X_plot_fits, alpha=0.05)
else:
#lower = np.mean(lowers, axis=0)
#upper = np.mean(uppers, axis=0)
var_Y = np.var(Y_plot_fits_all, axis=0, ddof = k-1)
sd_Y = var_Y**0.5
lower = Y_plot_fits - ((t*sd_Y)/((k-1)**0.5))
upper = Y_plot_fits + ((t*sd_Y)/((k-1)**0.5))
plt.fill_between(X_plot, lower, upper, color=color, alpha=0.1)
if plot:
if plot_measurements:
if model_type == 'lin':
plt.axis([min(min_X,0), 1.1*max(max_X,period), 0.9*min(min_Y, min_Y_test), 1.1*max(max_Y, max_Y_test)])
else:
plt.axis([min(min_X,0), max_X, 0.9*min(min_Y, min_Y_test), 1.1*max(max_Y, max_Y_test)])
else:
plt.axis([0, period, min_Y_test*0.9, max_Y_test*1.1])
if plot:
#pop_name = "_".join(test.split("_")[:-1])
if not hold:
plt.title(pop_name + ', p-value=' + "{0:.5f}".format(statistics['p']))
if save_to:
plt.savefig(save_to+'.png')
plt.savefig(save_to+'.pdf')
plt.close()
else:
plt.show()
statistics = calculate_statistics(x, y, Y_fit, n_components, period, lin_comp)
statistics_params = {'values': means,
'SE': se,
'CI': (lower_CI, upper_CI),
'p-values': p_values}
if params_CI:
population_eval_params_CI(X_test, X_fit_eval_params, results, statistics_params, rhythm_params, samples_per_param=samples_per_param_CI, max_samples = max_samples_CI, k=k, sampling_type=sampling_type, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, period=period)
if return_individual_params:
return params, statistics, statistics_params, rhythm_params, results, ind_params
else:
return params, statistics, statistics_params, rhythm_params, results
def fit_me(X, Y, n_components = 2, period = 24, lin_comp = False, model_type = 'lin', alpha = 0, name = '', save_to = '', plot=True, plot_residuals=False, plot_measurements=True, plot_margins=True, return_model = False, color = False, plot_phase = True, hold=False, x_label = "", y_label = "", rescale_to_period=False, bootstrap=False, bootstrap_size=1000, bootstrap_type="std", params_CI = False, samples_per_param_CI=5, max_samples_CI = 1000, sampling_type="LHS", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase']):
#print(lin_comp)
"""
###
# prepare the independent variables
###
"""
"""
if n_components == 0:
X_fit = X
X_fit_test = X_test
lin_comp = True
else:
for i in np.arange(n_components):
n = i+1
A = np.sin((X/(period/n))*np.pi*2)
B = np.cos((X/(period/n))*np.pi*2)
A_test = np.sin((X_test/(period/n))*np.pi*2)
B_test = np.cos((X_test/(period/n))*np.pi*2)
if not i:
X_fit = np.column_stack((A, B))
X_fit_test = np.column_stack((A_test, B_test))
else:
X_fit = np.column_stack((X_fit, np.column_stack((A, B))))
X_fit_test = np.column_stack((X_fit_test, np.column_stack((A_test, B_test))))
"""
X_fit = generate_independents(X, n_components=n_components, period=period, lin_comp=lin_comp)
#X_fit_eval_params = X_fit_test
#if lin_comp and n_components:
# X_fit = np.column_stack((X, X_fit))
# X_fit_eval_params = np.column_stack((np.zeros(len(X_test)), X_fit_test))
# X_fit_test = np.column_stack((X_test, X_fit_test))
#X_fit = sm.add_constant(X_fit, has_constant='add')
#X_fit_test = sm.add_constant(X_fit_test, has_constant='add')
#X_fit_eval_params = sm.add_constant(X_fit_eval_params, has_constant='add')
"""
###
# fit
###
"""
if model_type == 'lin':
model = sm.OLS(Y, X_fit)
results = model.fit()
elif model_type == 'poisson':
#model = sm.GLM(Y, X_fit, family=sm.families.Poisson())
model = statsmodels.discrete.discrete_model.Poisson(Y, X_fit)
results = model.fit(disp=0)
elif model_type =='gen_poisson':
#model = statsmodels.discrete.discrete_model.GeneralizedPoisson(Y, X_fit)
model = statsmodels.discrete.discrete_model.GeneralizedPoisson(Y, X_fit, p=1)
results = model.fit(disp=0)
elif model_type == 'nb':
# https://towardsdatascience.com/negative-binomial-regression-f99031bb25b4
# https://dius.com.au/2017/08/03/using-statsmodels-glms-to-model-beverage-consumption/#cameron
# if not alpha:
# train_model = sm.GLM(Y, X_fit, family=sm.families.Poisson())
# train_results = train_model.fit()
# df_train = pd.DataFrame()
# df_train['Y'] = Y
# df_train['mu'] = train_results.mu
# df_train['AUX_OLS_DEP'] = df_train.apply(lambda x: ((x['Y'] - x['mu'])**2 - x['Y']) / x['mu'], axis=1)
# ols_expr = """AUX_OLS_DEP ~ mu - 1"""
# aux_olsr_results = smf.ols(ols_expr, df_train).fit()
# alpha=aux_olsr_results.params[0]
#model = sm.GLM(Y, X_fit, family=sm.families.NegativeBinomial(alpha=alpha))
model = statsmodels.discrete.discrete_model.NegativeBinomialP(Y, X_fit, p=1)
results = model.fit(disp=0)
else:
print("Invalid option")
return
if model_type =='lin':
Y_fit = results.fittedvalues
else:
Y_fit = results.predict(X_fit)
if model_type in ['lin', 'poisson', 'nb']:
statistics = calculate_statistics(X, Y, Y_fit, n_components, period, lin_comp)
if model_type in ['poisson', 'nb']:
statistics['count'] = np.sum(Y)
else:
RSS = sum((Y - Y_fit)**2)
p = results.llr_pvalue
statistics = {'p':p, 'RSS':RSS, 'count': np.sum(Y)}
#Y_test = results.predict(X_fit_test)
X_test = np.linspace(0, 2*period, 1000)
X_fit_test = generate_independents(X_test, n_components=n_components, period=period, lin_comp=lin_comp, remove_lin_comp = True)
Y_fit_test = results.predict(X_fit_test)
rhythm_params = evaluate_rhythm_params(X_test, Y_fit_test, period=period)
if lin_comp:
rhythm_params['lin_comp'] = results.params[1]
CIs = results.conf_int()
if type(CIs) != np.ndarray:
rhythm_params['CI(lin_comp)'] = CIs.values[1]
else:
rhythm_params['CI(lin_comp)'] = CIs[1]
rhythm_params['p(lin_comp)'] = results.pvalues[1]
#print(rhythm_params['p(lin_comp)'])
"""
###
# plot
###
"""
if plot:
if plot_measurements:
min_X = np.min(X)
max_X = np.max(X)
else:
min_X = 0
max_X = period
X_plot = np.linspace(min_X, max_X, 1000)
X_plot_fits = generate_independents(X_plot, n_components=n_components, period=period, lin_comp=lin_comp)
Y_plot = results.predict(X_plot_fits)
###
if not color:
color = 'black'
if plot_measurements:
if not hold:
plt.plot(X,Y, 'ko', markersize=1, label = 'data', color=color)
else:
plt.plot(X,Y, 'ko', markersize=1, color=color)
if not hold:
plt.plot(X_plot, Y_plot, 'k', label = 'fit', color=color)
else:
plt.plot(X_plot, Y_plot, 'k', label = name, color=color)
# plot measurements
if plot_measurements:
if rescale_to_period:
X = X % period
if model_type == 'lin':
plt.axis([min_X, max_X, 0.9*min(min(Y), min(Y_plot)), 1.1*max(max(Y), max(Y_plot))])
else:
plt.axis([min_X, max_X, 0.9*min(min(Y), min(Y_plot)), 1.1*max(max(Y), max(Y_plot))])
else:
plt.axis([min_X, max_X, min(Y_plot)*0.9, max(Y_plot)*1.1])
if name:
plt.title(name)
"""
if model_type == 'lin':
if name:
plt.title(name + ', p-value=' + "{0:.5f}".format(statistics['p']))
else:
plt.title('p-value=' + "{0:.5f}".format(statistics['p']))
else:
if name:
plt.title(name + ', p-value=' + '{0:.3f}'.format(statistics['p']) + ' (n='+str(statistics['count'])+ ')')
else:
plt.title('p-value=' + '{0:.3f}'.format(statistics['p']) + ' (n='+str(statistics['count'])+ ')')
"""
if x_label:
plt.xlabel(x_label)
else:
plt.xlabel('Time [h]')
if y_label:
plt.ylabel(y_label)
elif model_type == 'lin':
plt.ylabel('Measurements')
else:
plt.ylabel('Count')
# plot confidence intervals
if plot_margins:
if model_type == 'lin':
_, lower, upper = wls_prediction_std(results, exog=X_plot_fits, alpha=0.05)
if color:
plt.fill_between(X_plot, lower, upper, color=color, alpha=0.1)
else:
plt.fill_between(X_plot, lower, upper, color='#888888', alpha=0.1)
else:
# calculate and draw plots from the combinations of parameters from the 95 % confidence intervals of assessed parameters
res2 = copy.deepcopy(results)
params = res2.params
CIs = results.conf_int()
if type(CIs) != np.ndarray:
CIs = CIs.values
#N = 512
N = 1024
if n_components == 1:
N2 = 10
elif n_components == 2:
N2 = 8
else:
N2 = 10 - n_components
P = np.zeros((len(params), N2))
for i, CI in enumerate(CIs):
P[i,:] = np.linspace(CI[0], CI[1], N2)
n_param_samples = P.shape[1]**P.shape[0]
N = n_param_samples #min(max_samples_CI, n_param_samples)
if n_param_samples < 10**6:
params_samples = np.random.choice(n_param_samples, size=N, replace=False)
else:
params_samples = my_random_choice(max_val=n_param_samples, size=N)
for i,idx in enumerate(params_samples):
p = lazy_prod(idx, P)
res2.initialize(results.model, p)
Y_test_CI = res2.predict(X_plot_fits)
if plot and plot_margins:
if color and color != '#000000':
plt.plot(X_plot, Y_test_CI, color=color, alpha=0.05)
else:
plt.plot(X_plot, Y_test_CI, color='#888888', alpha=0.05)
if not hold:
if save_to:
plt.savefig(save_to+'.png')
plt.savefig(save_to+'.pdf')
plt.close()
else:
plt.show()
if plot_residuals:
resid = results.resid
sm.qqplot(resid)
plt.title(name)
if save_to:
plt.savefig(save_to+'_resid.pdf', bbox_inches='tight')
plt.savefig(save_to+'_resid.png')
plt.close()
else:
plt.show()
if plot_phase:
per = rhythm_params['period']
amp = rhythm_params['amplitude']
phase = rhythm_params['acrophase']
if save_to:
folder = os.path.join(*os.path.split(save_to)[:-1])
plot_phases([phase], [amp], [name], period=per, folder=folder)
else:
plot_phases([phase], [amp], [name], period=per)#, plot_measurements=True, measurements=[X,Y])
if bootstrap:
eval_params_bootstrap(X, X_fit, X_test, X_fit_test, Y, model_type = model_type, rhythm_params=rhythm_params, bootstrap_size=bootstrap_size, bootstrap_type=bootstrap_type, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, period=period)
if params_CI:
eval_params_CI(X_test, X_fit_test, results, rhythm_params, samples_per_param = samples_per_param_CI, max_samples = max_samples_CI, k=len(X), sampling_type=sampling_type, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, period=period)
if return_model:
return results, statistics, rhythm_params, X_test, Y_fit_test, model
else:
return results, statistics, rhythm_params, X_test, Y_fit_test
"""
****************************
* end of fitting functions *
****************************
"""
"""
***********************
* start of assessment *
***********************
"""
# rhythm params
def evaluate_rhythm_params(X,Y, project_acrophase=True, period=0):
#plt.plot(X,Y)
#plt.show()
m = min(Y)
M = max(Y)
A = M - m
MESOR = m + A/2
AMPLITUDE = abs(A/2)
PHASE = 0
PHASE_LOC = 0
H = M - 0.01*M if M >= 0 else M + 0.01*M
locs, heights = signal.find_peaks(Y, height = H)
heights = heights['peak_heights']
if len(locs) >= 2:
period2 = X[locs[1]] - X[locs[0]]
period2 = int(round(period2))
else:
period2 = np.nan
if not period:
period = period2
if len(locs) >= 1:
PHASE = X[locs[0]]
PHASE_LOC = locs[0]
if period:
ACROPHASE = phase_to_radians(PHASE, period)
if project_acrophase:
ACROPHASE = project_acr(ACROPHASE)
else:
ACROPHASE = np.nan
# peaks and heights
#Y = Y[X < 24]
#X = X[X < 24]
locs, heights = signal.find_peaks(Y, height = MESOR)
heights = heights['peak_heights']
peaks = X[locs]
heights = Y[locs]
idxs1 = peaks <= period
peaks = peaks[idxs1]
heights = heights[idxs1]
Y2 = M - Y
locs2, heights2 = signal.find_peaks(Y2, height = MESOR-m)
heights2 = heights2['peak_heights']
troughs = X[locs2]
heights2 = Y[locs2]
idxs2 = troughs <= period
troughs = troughs[idxs2]
heights2 = heights2[idxs2]
# rhythm_params
return {'period':period, 'amplitude':AMPLITUDE, 'acrophase':ACROPHASE, 'mesor':MESOR, 'peaks': peaks, 'heights': heights, 'troughs': troughs, 'heights2': heights2, 'max_loc': PHASE_LOC, 'period2':period2}
def calculate_statistics(X, Y, Y_fit, n_components, period, lin_comp = False):
# statistics according to Cornelissen (eqs (8) - (9))
MSS = sum((Y_fit - Y.mean())**2)
RSS = sum((Y - Y_fit)**2)
n_params = n_components * 2 + 1
if lin_comp:
n_params += 1
N = Y.size
F = (MSS/(n_params - 1)) / (RSS/(N - n_params))
p = 1 - stats.f.cdf(F, n_params - 1, N - n_params)
#print("p-value(Cornelissen): {}".format(p))
# statistics of GOF according to Cornelissen (eqs (14) - (15))
# TODO: ali bi bilo potrebno popraviti za lumicycle - ko je več zaporednih meritev v eni časovni točki?
#X_periodic = (X % period).astype(int)
X_periodic = np.round_(X % period,2)
X_unique = np.unique(X_periodic)
n_T = len(X_unique)
SSPE = 0
for x in X_unique:
Y_i_avg = np.mean(Y[X_periodic == x])
SSPE += sum((Y[X_periodic == x] - Y_i_avg)**2)
SSLOF = RSS-SSPE
#print('RSS: ', RSS)
#print('SSPE: ', SSPE)
#print('SSLOF: ', SSLOF)
if lin_comp:
try:
F = (SSLOF/(n_T-1-(2*n_components + 1)))/(SSPE/(N-n_T))
p_reject = 1 - stats.f.cdf(F, n_T-1-(2*n_components + 1), N-n_T)
except:
F = np.nan
p_reject = np.nan
else:
try:
F = (SSLOF/(n_T-1-2*n_components))/(SSPE/(N-n_T))
p_reject = 1 - stats.f.cdf(F, n_T-1-2*n_components, N-n_T)
except:
F = np.nan
p_reject = np.nan
# Another measure that describes goodnes of fit
# How well does the curve describe the data?
# signal to noise ratio
# fitted curve: signal
# noise:
stdev_data = np.std(Y, ddof = 1)
stdev_fit = np.std(Y_fit, ddof = 1)
SNR = stdev_fit / stdev_data
# Standard Error of residuals, margin of error
# https://stats.stackexchange.com/questions/57746/what-is-residual-standard-error
DoF = N - n_params
resid_SE = np.sqrt(RSS/DoF)
# https://scientificallysound.org/2017/05/16/independent-t-test-python/
# https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/hypothesis-testing/margin-of-error/
critical_value = stats.t.ppf(1-0.025, DoF)
ME = critical_value * resid_SE
return {'p':p, 'p_reject':p_reject, 'SNR':SNR, 'RSS': RSS, 'resid_SE': resid_SE, 'ME': ME}
"""
*********************
* end of assessment *
*********************
"""
"""
*****************************
* start of compare wrappers *
*****************************
"""
# compare pairs using a given number of components and period
# analysis - options (from best to worst) (ADDITIONAL ANALYSIS)
# - bootstrap1: independent bootstrap analysis
# - CI1: independent analysis of confidence intervals of two models
# - bootstrap2: bootstrap analysis of a merged model
# - CI2: analysis of confidence intervals of a merged model
def compare_pairs_limo(df, pairs, n_components = 3, period = 24, folder = "", prefix = "", analysis = "", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], **kwargs):
if analysis not in ("", "CI1", "bootstrap1", "CI2", "bootstrap2"):
print("Invalid option")
return
columns = ['test', 'period', 'n_components', 'p', 'q', 'p params', 'q params', 'p(F test)', 'q(F test)']
if analysis:
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'd_{param}']
columns += [f'CI(d_{param})', f'p(d_{param})', f'q(d_{param})']
df_results = pd.DataFrame(columns = columns)
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
for test1, test2 in pairs:
for per in period:
for n_comps in n_components:
if folder:
save_to = os.path.join(folder,prefix + test1 + '-' + test2 + '_per=' + str(per) + '_comps=' + str(n_comps))
else:
save_to = ''
#pvalues, params, results = compare_pair_df_extended(df, test1, test2, n_components = n_comps, period = per, lin_comp = lin_comp, model_type = model_type, alpha=alpha, save_to = save_to, plot_measurements=plot_measurements)
#p_overall, pvalues, params, _ = compare_pair_df_extended(df, test1, test2, n_components = n_comps, period = per, save_to = save_to, **kwargs)
p_overall, p_params, p_F, _, _, rhythm_params = compare_pair_df_extended(df, test1, test2, n_components = n_comps, period = per, save_to = save_to, additional_analysis = analysis, parameters_to_analyse=parameters_to_analyse, parameters_angular=parameters_angular, **kwargs)
d = {}
d['test'] = test1 + ' vs. ' + test2
d['period'] = per
d['n_components'] = n_comps
d['d_amplitude'] = rhythm_params['d_amplitude']
d['d_acrophase'] = rhythm_params['d_acrophase']
d['p'] = p_overall
d['p params'] = p_params
d['p(F test)'] = p_F
if analysis:
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
df_results = df_results.append(d, ignore_index=True)
df_results['q'] = multi.multipletests(df_results['p'], method = 'fdr_bh')[1]
df_results['q params'] = multi.multipletests(df_results['p params'], method = 'fdr_bh')[1]
df_results['q(F test)'] = multi.multipletests(df_results['p(F test)'], method = 'fdr_bh')[1]
if analysis:
for param in parameters_to_analyse:
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
return df_results
# compare pairs using the best models as stored in df_best_models
# Basic analysis: fist analysis according to LymoRhyde (Singer:2019). Extended with the extra sum-of-squares F test that compares two nested models
# compare pairs with the presumption that the same model is used in both cases
# the same model: the same period and the same number of cosinor components
#
# analysis - options (from best to worst)
# - bootstrap1: independent bootstrap analysis
# - CI1: independent analysis of confidence intervals of two models
# - bootstrap2: bootstrap analysis of a merged model
# - CI2: analysis of confidence intervals of a merged model
def compare_pairs_best_models_limo(df, df_best_models, pairs, folder = "", prefix = "", analysis = "", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], **kwargs):
if analysis not in ("", "CI1", "bootstrap1", "CI2", "bootstrap2"):
print("Invalid option")
return
columns = ['test', 'period1', 'n_components1', 'period2', 'n_components2', 'p', 'q', 'p params', 'q params', 'p(F test)', 'q(F test)']
if analysis:
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'd_{param}']
columns += [f'CI(d_{param})', f'p(d_{param})', f'q(d_{param})']
df_results = pd.DataFrame(columns = columns)
for test1, test2 in pairs:
model1 = df_best_models[df_best_models["test"] == test1].iloc[0]
model2 = df_best_models[df_best_models["test"] == test2].iloc[0]
n_components1 = model1.n_components
n_components2 = model2.n_components
period1 = model1.period
period2 = model2.period
# if models have different number of components always start with the simpler model
# model is simpler if number of components is smaller
if n_components1 > n_components2:
test1, test2 = test2, test1
n_components1, n_components2 = n_components2, n_components1
period1, period2 = period2, period1
if folder:
save_to = os.path.join(folder, prefix + test1 + '-' + test2 + '_per1=' + str(period1) + '_comps1=' + str(n_components1) + '_per1=' + str(period2) + '_comps1=' + str(n_components2))
else:
save_to = ''
p_overall, p_params, p_F, params, _, rhythm_params = compare_pair_df_extended(df, test1, test2, n_components = n_components1, period = period1, n_components2 = n_components2, period2 = period2, save_to = save_to, additional_analysis = analysis, parameters_to_analyse=parameters_to_analyse, parameters_angular=parameters_angular, **kwargs)
d = {}
d['test'] = test1 + ' vs. ' + test2
d['period1'] = period1
d['n_components1'] = n_components1
d['period2'] = period2
d['n_components2'] = n_components2
d['d_amplitude'] = rhythm_params['d_amplitude']
d['d_acrophase'] = rhythm_params['d_acrophase']
d['p'] = p_overall
d['p params'] = p_params
d['p(F test)'] = p_F
if analysis:
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
df_results = df_results.append(d, ignore_index=True)
#d['CI(d_amplitude)'] = rhythm_params['CI(d_amplitude)']
#d['p(d_amplitude)'] = rhythm_params['p(d_amplitude)']
#d['CI(d_acrophase)'] = rhythm_params['CI(d_acrophase)']
#d['p(d_acrophase)'] = rhythm_params['p(d_acrophase)']
df_results = df_results.append(d, ignore_index=True)
df_results['q'] = multi.multipletests(df_results['p'], method = 'fdr_bh')[1]
df_results['q params'] = multi.multipletests(df_results['p params'], method = 'fdr_bh')[1]
df_results['q(F test)'] = multi.multipletests(df_results['p(F test)'], method = 'fdr_bh')[1]
if analysis:
for param in parameters_to_analyse:
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
return df_results
# compare pairs using a given number of components and period
# analysis - options (from best to worst)
# - bootstrap: independent bootstrap analysis
# - CI: independent analysis of confidence intervals of two models
# if you want to increase the speed specify df_results_extended in which for all analysed models confidence intervals for amplitude and acrophase are given - result of cosinor.analyse_models
def diff_p_t_test_from_CI(X1, X2, CI1, CI2, DoF, angular = False):
dX = X2 - X1
if angular:
dX = project_acr(dX)
t = abs(stats.t.ppf(0.05/2,df=DoF))
dev1 = (CI1[1] - CI1[0])/2
dev2 = (CI2[1] - CI2[0])/2
if angular:
dev1 = abs(project_acr(dev1))
dev2 = abs(project_acr(dev2))
else:
dev1 = abs(dev1)
dev2 = abs(dev2)
dev = dev1+dev2
se = (dev1 + dev2)/t
CI = [dX-dev, dX+dev]
T0 = dX/se
p_val = 2 * (1 - stats.t.cdf(abs(T0), DoF))
return dX, p_val, CI
def compare_pairs(df, pairs, n_components = 3, period = 24, analysis = "bootstrap", df_results_extended = pd.DataFrame(columns=["test"]), parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], lin_comp = False, **kwargs):
if (analysis != "CI") and (analysis != "bootstrap"):
print("Invalid option")
return
columns = ['test', 'period', 'n_components', 'p1', 'p2', 'q1', 'q2']
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'd_{param}']
columns += [f'CI(d_{param})', f'p(d_{param})', f'q(d_{param})']
df_results = pd.DataFrame(columns = columns)
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
for test1, test2 in pairs:
for per in period:
for n_comps in n_components:
d = {}
d['test'] = test1 + ' vs. ' + test2
d['period'] = per
d['n_components'] = n_comps
single_params = {}
if (test1 in list(df_results_extended['test'])) and (test2 in list(df_results_extended['test'])):
try:
res1 = dict(df_results_extended[(df_results_extended['test'] == test1) & (df_results_extended['n_components'] == n_comps) & (df_results_extended['period'] == per)].iloc[0])
res2 = dict(df_results_extended[(df_results_extended['test'] == test2) & (df_results_extended['n_components'] == n_comps) & (df_results_extended['period'] == per)].iloc[0])
single_params["test1"] = {}
single_params["test2"] = {}
for param in parameters_to_analyse:
single_params["test1"][f'CI({param})'] = res1[f'CI({param})']
single_params["test2"][f'CI({param})'] = res2[f'CI({param})']
except:
pass
if analysis == "CI":
rhythm_params = compare_pair_CI(df, test1, test2, n_components = n_comps, period = per, single_params=single_params, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, lin_comp = lin_comp, **kwargs)
elif analysis == "bootstrap":
rhythm_params = compare_pair_bootstrap(df, test1, test2, n_components = n_comps, period = per, single_params=single_params, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, lin_comp = lin_comp, **kwargs)
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
d['p1'] = rhythm_params['statistics1']['p']
d['p2'] = rhythm_params['statistics2']['p']
if lin_comp:
rp1 = rhythm_params['rhythm_params1']
rp2 = rhythm_params['rhythm_params2']
d['d_lin_comp'], d['p(d_lin_comp)'], d['CI(d_lin_comp)'] = diff_p_t_test_from_CI(rp1['lin_comp'], rp2['lin_comp'], rp1['CI(lin_comp)'], rp2['CI(lin_comp)'], rhythm_params['DoF'])
df_results = df_results.append(d, ignore_index=True)
df_results['q1'] = multi.multipletests(df_results['p1'], method = 'fdr_bh')[1]
df_results['q2'] = multi.multipletests(df_results['p2'], method = 'fdr_bh')[1]
for param in parameters_to_analyse:
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
if lin_comp:
param = "lin_comp"
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
return df_results
# compare pairs using the best models as stored in df_best_models
# each member of a pair uses its own model
# analysis - options (from best to worst)
# - bootstrap: independent bootstrap analysis
# - CI: independent analysis of confidence intervals of two models
# if you want to increase the speed specify df_results_extended in which for all analysed models confidence intervals for amplitude and acrophase are given - result of cosinor.analyse_best_models
def compare_pairs_best_models(df, df_best_models, pairs, analysis = "bootstrap", df_results_extended = pd.DataFrame(columns=["test"]), parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], lin_comp=False, **kwargs):
if (analysis != "CI") and (analysis != "bootstrap"):
print("Invalid option")
return
columns = ['test', 'period1', 'n_components1', 'period2', 'n_components2', 'p1', 'p2', 'q1', 'q2']
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'd_{param}']
columns += [f'CI(d_{param})', f'p(d_{param})', f'q(d_{param})']
df_results = pd.DataFrame(columns = columns)
for test1, test2 in pairs:
model1 = df_best_models[df_best_models["test"] == test1].iloc[0]
model2 = df_best_models[df_best_models["test"] == test2].iloc[0]
n_components1 = model1.n_components
n_components2 = model2.n_components
period1 = model1.period
period2 = model2.period
d = {}
d['test'] = test1 + ' vs. ' + test2
d['period1'] = period1
d['n_components1'] = n_components1
d['period2'] = period2
d['n_components2'] = n_components2
single_params = {}
if (test1 in list(df_results_extended['test'])) and (test2 in list(df_results_extended['test'])):
try:
res1 = dict(df_results_extended[(df_results_extended['test'] == test1) & (df_results_extended['n_components'] == n_components1) & (df_results_extended['period'] == period1)].iloc[0])
res2 = dict(df_results_extended[(df_results_extended['test'] == test2) & (df_results_extended['n_components'] == n_components2) & (df_results_extended['period'] == period2)].iloc[0])
single_params["test1"] = {}
single_params["test2"] = {}
for param in parameters_to_analyse:
single_params["test1"][f'CI({param})'] = res1[f'CI({param})']
single_params["test2"][f'CI({param})'] = res2[f'CI({param})']
except:
pass
if analysis == "CI":
rhythm_params = compare_pair_CI(df, test1, test2, n_components = n_components1, period = period1, n_components2 = n_components2, period2 = period2, single_params = single_params, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, lin_comp = lin_comp, **kwargs)
elif analysis == "bootstrap":
rhythm_params = compare_pair_bootstrap(df, test1, test2, n_components = n_components1, period = period1, n_components2 = n_components2, period2 = period2, single_params = single_params, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, lin_comp = lin_comp, **kwargs)
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
d['p1'] = rhythm_params['statistics1']['p']
d['p2'] = rhythm_params['statistics2']['p']
if lin_comp:
rp1 = rhythm_params['rhythm_params1']
rp2 = rhythm_params['rhythm_params2']
d['d_lin_comp'], d['p(d_lin_comp)'], d['CI(d_lin_comp)'] = diff_p_t_test_from_CI(rp1['lin_comp'], rp2['lin_comp'], rp1['CI(lin_comp)'], rp2['CI(lin_comp)'], rhythm_params['DoF'])
df_results = df_results.append(d, ignore_index=True)
df_results['q1'] = multi.multipletests(df_results['p1'], method = 'fdr_bh')[1]
df_results['q2'] = multi.multipletests(df_results['p2'], method = 'fdr_bh')[1]
for param in parameters_to_analyse:
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
if lin_comp:
param = "lin_comp"
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
return df_results
# compare pairs using a given number of components and period
# analysis - options (from best to worst)
# - CI: independent analysis of confidence intervals of two models
# - permutation: permutation/randomisation test
# if you want to increase the speed specify df_results_extended in which for all analysed models confidence intervals for amplitude and acrophase are given - result of cosinor.analyse_models_population
def compare_pairs_population(df, pairs, n_components = 3, period = 24, folder = "", prefix = "", analysis = "CI", lin_comp= False, model_type = 'lin', df_results_extended = pd.DataFrame(columns=["test"]), parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], **kwargs):
if (analysis != "CI") and (analysis != "permutation"):
print("Invalid option")
return
columns = ['test', 'period', 'n_components', 'p1', 'p2', 'q1', 'q2']
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'd_{param}']
if analysis == "CI":
columns += [f'CI(d_{param})', f'p(d_{param})', f'q(d_{param})']
else:
columns += [f'p(d_{param})', f'q(d_{param})'] # permutation test does not assess the confidence intervals
df_results = pd.DataFrame(columns = columns)
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
for test1, test2 in pairs:
for per in period:
for n_comps in n_components:
df_pop1 = df[df.test.str.startswith(test1)]
df_pop2 = df[df.test.str.startswith(test2)]
_, statistics1, _, rhythm_params1, _ = population_fit(df_pop1, n_components = n_comps, period = per, plot = False, lin_comp = lin_comp, model_type = model_type)
_, statistics2, _, rhythm_params2, _ = population_fit(df_pop2, n_components = n_comps, period = per, plot = False, lin_comp = lin_comp, model_type = model_type)
d = {}
d['test'] = test1 + ' vs. ' + test2
d['period'] = per
d['n_components'] = n_comps
d['d_amplitude'] = rhythm_params2['amplitude'] - rhythm_params1['amplitude']
d['d_acrophase'] = project_acr(rhythm_params2['acrophase'] - rhythm_params1['acrophase'])
d['p1'] = statistics1['p']
d['p2'] = statistics2['p']
if analysis == "CI":
single_params = {}
if (test1 in list(df_results_extended['test'])) and (test2 in list(df_results_extended['test'])):
try:
res1 = dict(df_results_extended[(df_results_extended['test'] == test1) & (df_results_extended['n_components'] == n_comps) & (df_results_extended['period'] == per)].iloc[0])
res2 = dict(df_results_extended[(df_results_extended['test'] == test2) & (df_results_extended['n_components'] == n_comps) & (df_results_extended['period'] == per)].iloc[0])
single_params["test1"] = {}
single_params["test2"] = {}
for param in parameters_to_analyse:
single_params["test1"][f'CI({param})'] = res1[f'CI({param})']
single_params["test2"][f'CI({param})'] = res2[f'CI({param})']
except:
pass
rhythm_params = compare_pair_population_CI(df, test1, test2, n_components=n_comps, period=per, lin_comp = lin_comp, model_type = model_type, single_params = single_params, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, **kwargs)
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
elif analysis == "permutation":
rhythm_params = permutation_test_population_approx(df, [(test1,test2)], n_components=n_comps, period=per, plot=False, lin_comp = lin_comp, model_type = model_type, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, **kwargs)
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
#d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
df_results = df_results.append(d, ignore_index=True)
df_results['q1'] = multi.multipletests(df_results['p1'], method = 'fdr_bh')[1]
df_results['q2'] = multi.multipletests(df_results['p2'], method = 'fdr_bh')[1]
for param in parameters_to_analyse:
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
return df_results
# compare pairs using the best models as stored in best_models
# analysis - options (from best to worst)
# - CI: independent analysis of confidence intervals of two models
# - permutation: permutation/randomisation test
# if you want to increase the speed specify df_results_extended in which for all analysed models confidence intervals for amplitude and acrophase are given - result of cosinor.analyse_best_models_population
def compare_pairs_best_models_population(df, df_best_models, pairs, folder = "", prefix = "", analysis = "CI", df_results_extended = pd.DataFrame(columns=["test"]), parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], **kwargs):
if (analysis != "CI") and (analysis != "permutation"):
print("Invalid option")
return
columns = ['test', 'period1', 'n_components1', 'period2', 'n_components2', 'p1', 'p2', 'q1', 'q2']
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'd_{param}']
if analysis == "CI":
columns += [f'CI(d_{param})', f'p(d_{param})', f'q(d_{param})']
else:
columns += [f'p(d_{param})', f'q(d_{param})'] # permutation test does not assess the confidence intervals
df_results = pd.DataFrame(columns = columns)
for test1, test2 in pairs:
model1 = df_best_models[df_best_models["test"] == test1].iloc[0]
model2 = df_best_models[df_best_models["test"] == test2].iloc[0]
n_components1 = model1.n_components
n_components2 = model2.n_components
period1 = model1.period
period2 = model2.period
p1 = model1.p
p2 = model2.p
q1 = model1.q
q2 = model2.q
d_amplitude = model2.amplitude - model1.amplitude
d_acrophase = project_acr(model2.acrophase - model1.acrophase)
d = {}
d['test'] = test1 + ' vs. ' + test2
d['period1'] = period1
d['n_components1'] = n_components1
d['period2'] = period2
d['n_components2'] = n_components2
d['d_amplitude'] = d_amplitude
d['d_acrophase'] = d_acrophase
d['p1'] = p1
d['p2'] = p2
d['q1'] = q1
d['q2'] = q2
if analysis == "CI":
single_params = {}
if (test1 in list(df_results_extended['test'])) and (test2 in list(df_results_extended['test'])):
try:
res1 = dict(df_results_extended[(df_results_extended['test'] == test1) & (df_results_extended['n_components'] == n_components1) & (df_results_extended['period'] == period1)].iloc[0])
res2 = dict(df_results_extended[(df_results_extended['test'] == test2) & (df_results_extended['n_components'] == n_components2) & (df_results_extended['period'] == period2)].iloc[0])
single_params["test1"] = {}
single_params["test2"] = {}
for param in parameters_to_analyse:
single_params["test1"][f'CI({param})'] = res1[f'CI({param})']
single_params["test2"][f'CI({param})'] = res2[f'CI({param})']
except:
pass
rhythm_params = compare_pair_population_CI(df, test1, test2, n_components=n_components1, period=period1, n_components2=n_components2, period2=period2, single_params = single_params, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, **kwargs)
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
elif analysis == "permutation":
rhythm_params = permutation_test_population_approx(df, [(test1,test2)], n_components=n_components1, period=period1, n_components2=n_components2, period2=period2, plot=False, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, **kwargs)
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
#d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
df_results = df_results.append(d, ignore_index=True)
for param in parameters_to_analyse:
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
return df_results
"""
***************************
* end of compare wrappers *
***************************
"""
#def compare_pair_df_extended(df, test1, test2, n_components = 3, period = 24, n_components2 = None, period2 = None, lin_comp = False, model_type = 'lin', alpha = 0, save_to = '', non_rhythmic = False, plot=True, plot_measurements=True, plot_residuals=False, plot_margins=True, x_label = '', y_label = '', bootstrap = False, bootstrap_independent = False, bootstrap_type="std", bootstrap_size=1000, params_CI = False, params_CI_independent = False, samples_per_param_CI=5, max_samples_CI = 1000, sampling_type="LHS"):
# additional analysis - options (from best to worst)
# - bootstrap1: independent bootstrap analysis
# - CI1: independent analysis of confidence intervals of two models
# - bootstrap2: bootstrap analysis of a merged model
# - CI2: analysis of confidence intervals of a merged model
def compare_pair_df_extended(df, test1, test2, n_components = 3, period = 24, n_components2 = None, period2 = None, lin_comp = False, model_type = 'lin', alpha = 0, save_to = '', non_rhythmic = False, plot=True, plot_measurements=True, plot_residuals=False, plot_margins=True, x_label = '', y_label = '', additional_analysis = "", bootstrap_type="std", bootstrap_size=1000, samples_per_param_CI=5, max_samples_CI = 1000, sampling_type="LHS", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase']):
n_components1 = n_components
period1 = period
if not n_components2:
n_components2 = n_components1
if not period2:
period2 = period1
df_pair = df[(df.test == test1) | (df.test == test2)].copy()
df_pair['h_i'] = 0
df_pair.loc[df_pair.test == test2, 'h_i'] = 1
X = df_pair.x
Y = df_pair.y
H_i = df_pair.h_i
"""
###
# prepare the independent variables
###
"""
X_i = H_i * X
for i in np.arange(n_components1):
n = i+1
A = np.sin((X/(period1/n))*np.pi*2)
B = np.cos((X/(period1/n))*np.pi*2)
if not i:
X_fit = np.column_stack((A, B))
else:
X_fit = np.column_stack((X_fit, np.column_stack((A, B))))
if non_rhythmic:
X_fit = np.column_stack((X_fit, H_i))
idx_params = np.array([-1])
else:
for i in np.arange(n_components2):
n = i+1
A_i = H_i * np.sin((X/(period2/n))*np.pi*2)
B_i = H_i * np.cos((X/(period2/n))*np.pi*2)
X_fit = np.column_stack((X_fit, np.column_stack((A_i, B_i))))
X_fit = np.column_stack((X_fit, H_i))
# idx_params = [3, 4] # n = 1
# idx_params = [5, 6, 7, 8] # n = 2
# idx_params = [7, 8, 9, 10, 11, 12] # n = 3
# idx_params = [9, 10, 11, 12, 13, 14, 15, 16] # n = 4
#strt = 2*n_components + 1
#stp = strt + 2*n_components + 1
strt = -2
stp = strt - 2*n_components2 - 1
idx_params = np.arange(strt, stp, -1)
if lin_comp:
X_fit = np.column_stack((X_i, X_fit))
X_fit = np.column_stack((X, X_fit))
idx_params = np.array(idx_params) + 2
X_fit = sm.add_constant(X_fit, has_constant='add')
"""
###
# fit
###
"""
if model_type == 'lin':
model = sm.OLS(Y, X_fit)
results = model.fit()
elif model_type == 'poisson':
#model = sm.GLM(Y, X_fit, family=sm.families.Poisson())
model = statsmodels.discrete.discrete_model.Poisson(Y, X_fit)
results = model.fit(disp=0)
elif model_type =='gen_poisson':
#model = statsmodels.discrete.discrete_model.GeneralizedPoisson(Y, X_fit)
model = statsmodels.discrete.discrete_model.GeneralizedPoisson(Y, X_fit, p=1)
results = model.fit(disp=0)
elif model_type == 'nb':
# if not alpha:
# train_model = sm.GLM(Y, X_fit, family=sm.families.Poisson())
# train_results = train_model.fit()
# df_train = pd.DataFrame()
# df_train['Y'] = Y
# df_train['mu'] = train_results.mu
# df_train['AUX_OLS_DEP'] = df_train.apply(lambda x: ((x['Y'] - x['mu'])**2 - x['Y']) / x['mu'], axis=1)
# ols_expr = """AUX_OLS_DEP ~ mu - 1"""
# aux_olsr_results = smf.ols(ols_expr, df_train).fit()
# alpha=aux_olsr_results.params[0]
# model = sm.GLM(Y, X_fit, family=sm.families.NegativeBinomial(alpha=alpha))
model = statsmodels.discrete.discrete_model.NegativeBinomialP(Y, X_fit, p=1)
results = model.fit(disp=0)
else:
print("Invalid option")
return
"""
###
# plot
###
"""
###
if plot and plot_measurements:
plt.plot(df_pair[df_pair.test == test1].x, df_pair[df_pair.test == test1].y, 'ko', markersize=1, alpha = 0.75)
plt.plot(df_pair[df_pair.test == test2].x, df_pair[df_pair.test == test2].y, 'ro', markersize=1, alpha = 0.75)
#plt.plot(X, results.fittedvalues, label = 'fit')
if model_type =='lin':
Y_fit = results.fittedvalues
p_overall = results.f_pvalue
else:
Y_fit = results.predict(X_fit)
p_overall = results.llr_pvalue
X1 = X[H_i == 0]
#Y_fit1 = Y_fit[H_i == 0]
#L1 = list(zip(X1,Y_fit1))
#L1.sort()
#X1,Y_fit1 = list(zip(*L1))
X2 = X[H_i == 1]
#Y_fit2 = Y_fit[H_i == 1]
#L2 = list(zip(X2,Y_fit2))
#L2.sort()
#X2,Y_fit2 = list(zip(*L2))
#plt.plot(X1, Y_fit1, 'k', label = 'fit '+test1)
#plt.plot(X2, Y_fit2, 'r', label = 'fit '+test2)
### F-test
# for nested models
# using extra-sum-of-squares F test
# in a similar way as described in CYCLOPS
# https://www.pnas.org/content/114/20/5312#sec-8
# https://www.pnas.org/content/pnas/suppl/2017/04/20/1619320114.DCSupplemental/pnas.201619320SI.pdf?targetid=nameddest%3DSTXT
n_params_full = len(results.params)
n_params_small = n_params_full - len(idx_params)
N = len(Y)
r_small = fit_me(X, Y, n_components, period, lin_comp=lin_comp, model_type=model_type, alpha=alpha, plot=False, x_label = x_label, y_label = y_label)
RSS_small = r_small[1]['RSS']
RSS_full = sum((Y - Y_fit)**2)
DoF_small = N-n_params_small
DoF_full = N-n_params_full
"""
print('RSS_small: ', RSS_small)
print('RSS_full: ', RSS_full)
print('n_small, dof: ', n_params_small, DoF_small)
print('n_full, dof: ', n_params_full, DoF_full)
"""
p_f = compare_models(RSS_small, RSS_full, DoF_small, DoF_full)
if plot:
### plot with higher density
n_points = 1000
max_P = max(period1, period2)
X_full = np.linspace(min(min(X1),min(X2)), max(max_P, max(max(X1), max(X2))), n_points)
X_fit_full = generate_independents_compare(X_full, X_full, n_components1 = n_components1, period1 = period1, n_components2 = n_components2, period2 = period2, lin_comp= lin_comp)
H_i = X_fit_full[:,-1]
locs = H_i == 0
#Y_fit_full = results.predict(X_fit_full)
#plt.plot(X_full, Y_fit_full[0:n_points], 'k', label = test1)
#plt.plot(X_full, Y_fit_full[n_points:], 'r', label = test2)
Y_fit_full1 = results.predict(X_fit_full[locs])
Y_fit_full2 = results.predict(X_fit_full[~locs])
plt.plot(X_full, Y_fit_full1, 'k', label = test1)
plt.plot(X_full, Y_fit_full2, 'r', label = test2)
if model_type == 'lin' and plot_margins:
_, lower, upper = wls_prediction_std(results, exog=X_fit_full[locs], alpha=0.05)
plt.fill_between(X_full, lower, upper, color='black', alpha=0.1)
_, lower, upper = wls_prediction_std(results, exog=X_fit_full[~locs], alpha=0.05)
plt.fill_between(X_full, lower, upper, color='red', alpha=0.1)
### end of plot with higher density
#p = min(results.pvalues[idx_params])
#plt.title(test1 + ' vs. ' + test2 + ', p-value=' + "{0:.5f}".format(p))
plt.title(test1 + ' vs. ' + test2 + ', p-value=' + "{0:.5f}".format(p_f))
plt.xlabel('time [h]')
plt.ylabel('measurements')
plt.legend()
#fig = plt.gcf()
#fig.set_size_inches(11,8)
if save_to:
plt.savefig(save_to+'.png')
plt.savefig(save_to+'.pdf')
plt.close()
else:
plt.show()
if plot_residuals:
resid = results.resid
sm.qqplot(resid)
plt.title(test1 + ' vs. ' + test2)
save_to_resid = save_to.split(".")[0] + '_resid' + save_to.split(".")[1]
if save_to:
plt.savefig(save_to_resid)
plt.close()
else:
plt.show()
#p_values = list(results.pvalues[idx_params]) + [p_f]
pvalues = (results.pvalues)
if type(pvalues) != np.ndarray:
pvalues = pvalues.values
p_params = np.nanmin(pvalues[idx_params.astype(int)])
# eval rhythm parameters
n_points = 1000
max_P = max(2*period1, 2*period2)
X_full = np.linspace(0, max_P, n_points)
X_fit_full = generate_independents_compare(X_full, X_full, n_components1 = n_components1, period1 = period1, n_components2 = n_components2, period2 = period2, lin_comp= lin_comp, remove_lin_comp=True)
H_i = X_fit_full[:,-1]
locs = H_i == 0
Y_fit_full1 = results.predict(X_fit_full[locs])
Y_fit_full2 = results.predict(X_fit_full[~locs])
# rhythm_params
rhythm_params1 = evaluate_rhythm_params(X_full, Y_fit_full1, period=period1)
rhythm_params2 = evaluate_rhythm_params(X_full, Y_fit_full2, period=period2)
rhythm_params = {'amplitude1': rhythm_params1['amplitude'],
'amplitude2': rhythm_params2['amplitude'],
'd_amplitude': rhythm_params2['amplitude']-rhythm_params1['amplitude'],
'acrophase1': rhythm_params1['acrophase'],
'acrophase2': rhythm_params2['acrophase'],
'd_acrophase': project_acr(rhythm_params2['acrophase']-rhythm_params1['acrophase']),
'mesor1': rhythm_params1['mesor'],
'mesor2': rhythm_params2['mesor'],
'd_mesor': rhythm_params2['mesor']-rhythm_params1['mesor']}
if additional_analysis == "CI1":
compare_pair_CI(df, test1, test2, n_components = n_components1, period = period1, n_components2 = n_components2, period2 = period2, samples_per_param_CI = samples_per_param_CI, max_samples_CI = max_samples_CI, sampling_type=sampling_type, rhythm_params=rhythm_params, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular)
elif additional_analysis == "bootstrap1":
compare_pair_bootstrap(df, test1, test2, n_components = n_components1, period = period1, n_components2 = n_components2, period2=period2, rhythm_params=rhythm_params, bootstrap_size=bootstrap_size, bootstrap_type=bootstrap_type, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular)
elif additional_analysis == "CI2":
eval_params_diff_CI(X_full, X_fit_full, locs, results, rhythm_params = rhythm_params, samples_per_param = samples_per_param_CI, max_samples = max_samples_CI, k = len(X), sampling_type=sampling_type, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, period1=period1, period2=period2)
elif additional_analysis == "bootstrap2":
eval_params_diff_bootstrap(X, X_fit, X_full, X_fit_full, Y, model_type, locs, rhythm_params = rhythm_params, bootstrap_size = bootstrap_size, bootstrap_type = bootstrap_type, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, period1=period1, period2=period2)
elif additional_analysis == "":
pass
else:
print("Invalid option")
if additional_analysis:
for param in parameters_to_analyse:
d_param = rhythm_params2[param] - rhythm_params1[param]
if param in parameters_angular:
d_param = project_acr(d_param)
rhythm_params[f'd_{param}'] = d_param
return (p_overall, p_params, p_f, results.params[idx_params], results, rhythm_params)
def plot_df_models(df, df_models, folder ="", **kwargs):
for row in df_models.iterrows():
test = row[1].test
n_components = row[1].n_components
period = row[1].period
X, Y = np.array(df[df.test == test].x), np.array(df[df.test == test].y)
if folder:
save_to = os.path.join(folder,test+'_compnts='+str(n_components) +'_per=' + str(period))
else:
save_to = ""
fit_me(X, Y, n_components = n_components, period = period, name = test, save_to = save_to, plot=True, **kwargs)
"""
******************************
* start of analysis wrappers *
******************************
"""
# perform a more detailed analysis of the models that were identified to be the best, interesting... in previous analyses
# analysis - options (from best to worst)
# - bootstrap
# - CI: analysis of confidence intervals of regression coefficients
def analyse_models(df, n_components = 3, period = 24, plot = False, folder = "", analysis = "bootstrap", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], lin_comp = False, **kwargs):
params_CI = False
bootstrap = False
if analysis == "CI":
params_CI = True
elif analysis == "bootstrap":
bootstrap = True
else:
print("Invalid option")
return
columns = ['test', 'period', 'n_components', 'p', 'q', 'p_reject', 'q_reject']#, 'amplitude', 'acrophase']
if not lin_comp:
parameters_to_analyse_ext = parameters_to_analyse
else:
parameters_to_analyse_ext = parameters_to_analyse + ['lin_comp']
for param in parameters_to_analyse_ext:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'{param}']
columns += [f'CI({param})', f'p({param})', f'q({param})']
df_results_extended = pd.DataFrame(columns = columns)
save_to = "" # for figures
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
for test in df.test.unique():
for per in period:
for n_comps in n_components:
X, Y = np.array(df[df.test == test].x), np.array(df[df.test == test].y)
if plot and folder:
save_to = os.path.join(folder,test+'_compnts='+str(n_comps) +'_per=' + str(per))
_, statistics, rhythm_params, _, _ = fit_me(X, Y, n_components = n_comps, period = per, name = test, save_to = save_to, plot=plot, bootstrap=bootstrap, params_CI = params_CI, parameters_to_analyse=parameters_to_analyse, parameters_angular=parameters_angular, lin_comp = lin_comp, **kwargs)
#if sparse_output:
# row = dict(row[1][['test', 'per', 'n_comps', 'p', 'q', 'p_reject', 'q_reject', 'amplitude', 'acrophase', 'mesor']])
#else:
row = {'test': test,
'period': per,
'n_components': n_comps,
'p': statistics['p'],
'q': np.nan,
'p_reject': statistics['p_reject'],
'q_reject': np.nan,
'amplitude': rhythm_params['amplitude'],
'acrophase': rhythm_params['acrophase']}
for param in parameters_to_analyse_ext:
row[f'{param}'] = rhythm_params[f'{param}']
row[f'CI({param})'] = rhythm_params[f'CI({param})']
row[f'p({param})'] = rhythm_params[f'p({param})']
row[f'q({param})'] = np.nan
df_results_extended = df_results_extended.append(row, ignore_index=True, sort=False)
df_results_extended['q'] = multi.multipletests(df_results_extended['p'], method = 'fdr_bh')[1]
df_results_extended['q_reject'] = multi.multipletests(df_results_extended['p_reject'], method = 'fdr_bh')[1]
for param in parameters_to_analyse_ext:
df_results_extended[f'q({param})'] = multi.multipletests(df_results_extended[f'p({param})'], method = 'fdr_bh')[1]
return df_results_extended
# perform a more detailed analysis of the models that were identified to be the best, interesting... in previous analyses
# analysis - options (from best to worst)
# - bootstrap
# - CI: analysis of confidence intervals of regression coefficients
def analyse_best_models(df, df_models, sparse_output = True, plot = False, folder = "", analysis = "bootstrap", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], lin_comp = False, **kwargs):
params_CI = False
bootstrap = False
if analysis == "CI":
params_CI = True
elif analysis == "bootstrap":
bootstrap = True
else:
print("Invalid option")
return
columns = ['test', 'period', 'n_components', 'p', 'q', 'p_reject', 'q_reject']
if not lin_comp:
parameters_to_analyse_ext = parameters_to_analyse
else:
parameters_to_analyse_ext = parameters_to_analyse + ['lin_comp']
for param in parameters_to_analyse_ext:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'{param}']
columns += [f'CI({param})', f'p({param})', f'q({param})']
df_results_extended = pd.DataFrame(columns = columns)
if sparse_output:
df_models = df_models[['test', 'period', 'n_components', 'p', 'q', 'p_reject', 'q_reject', 'amplitude', 'acrophase']]
save_to = "" # for figures
for row in df_models.iterrows():
test = row[1].test
n_components = row[1].n_components
period = row[1].period
X, Y = np.array(df[df.test == test].x), np.array(df[df.test == test].y)
if plot and folder:
save_to = os.path.join(folder,test+'_compnts='+str(n_components) +'_per=' + str(period))
_, _, rhythm_params, _, _ = fit_me(X, Y, n_components = n_components, period = period, name = test, save_to = save_to, plot=plot, bootstrap=bootstrap, params_CI = params_CI, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, lin_comp = lin_comp, **kwargs)
row = dict(row[1])
for param in parameters_to_analyse_ext:
row[f'{param}'] = rhythm_params[f'{param}']
row[f'CI({param})'] = rhythm_params[f'CI({param})']
row[f'p({param})'] = rhythm_params[f'p({param})']
row[f'q({param})'] = np.nan
df_results_extended = df_results_extended.append(row, ignore_index=True, sort=False)
for param in parameters_to_analyse_ext:
df_results_extended[f'q({param})'] = multi.multipletests(df_results_extended[f'p({param})'], method = 'fdr_bh')[1]
return df_results_extended
# perform a more detailed analysis of the models that were identified to be the best, interesting... in previous analyses
# the only option supported is the CI anaylsis: analysis of confidence intervals of regression coefficients
def analyse_models_population(df, n_components = 3, period = 24, plot=False, folder = "", prefix="", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], **kwargs):
columns = ['test', 'period', 'n_components', 'p', 'q', 'p_reject', 'q_reject']#, 'amplitude', 'acrophase']
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'{param}']
columns += [f'CI({param})', f'p({param})', f'q({param})']
df_results_extended = pd.DataFrame(columns = columns)
save_to = "" # for figures
names = np.unique(df.test)
names = list(set(list(map(lambda x:x.split('_rep')[0], names))))
names.sort()
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
for name in names:
for n_comps in n_components:
for per in period:
df_pop = df[df.test.str.startswith(name)]
if plot and folder:
save_to=os.path.join(folder,prefix+name+'_compnts='+str(n_comps) +'_per=' + str(per))
_, statistics, _, rhythm_params, _ = population_fit(df_pop, n_components = n_comps, period = per, plot = plot, save_to = save_to, params_CI = True, **kwargs)
row = {'test': name,
'period': per,
'n_components': n_comps,
'p': statistics['p'],
'q': np.nan,
'p_reject': statistics['p_reject'],
'q_reject': np.nan,
'amplitude': rhythm_params['amplitude'],
'acrophase': rhythm_params['acrophase']}
for param in parameters_to_analyse:
row[f'{param}'] = rhythm_params[f'{param}']
row[f'CI({param})'] = rhythm_params[f'CI({param})']
row[f'p({param})'] = rhythm_params[f'p({param})']
row[f'q({param})'] = np.nan
df_results_extended = df_results_extended.append(row, ignore_index=True, sort=False)
df_results_extended['q'] = multi.multipletests(df_results_extended['p'], method = 'fdr_bh')[1]
df_results_extended['q_reject'] = multi.multipletests(df_results_extended['p_reject'], method = 'fdr_bh')[1]
for param in parameters_to_analyse:
df_results_extended[f'q({param})'] = multi.multipletests(df_results_extended[f'p({param})'], method = 'fdr_bh')[1]
return df_results_extended
# perform a more detailed analysis of the models that were identified to be the best, interesting... in previous analyses
# the only option supported is the CI anaylsis: analysis of confidence intervals of regression coefficients
def analyse_best_models_population(df, df_models, sparse_output = True, plot=False, folder = "", prefix="", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], **kwargs):
columns = ['test', 'period', 'n_components', 'p', 'q', 'p_reject', 'q_reject']#, 'amplitude', 'acrophase']
if 'lin_comp' in kwargs and kwargs['lin_comp'] and 'lin_comp' not in parameters_to_analyse:
parameters_to_analyse += ['lin_comp']
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'{param}']
columns += [f'CI({param})', f'p({param})', f'q({param})']
df_results_extended = pd.DataFrame(columns = columns)
if sparse_output:
df_models = df_models[['test', 'period', 'n_components', 'p', 'q', 'p_reject', 'q_reject', 'amplitude', 'acrophase']]
save_to = "" # for figures
for row in df_models.iterrows():
name = row[1].test
n_comps = row[1].n_components
per = row[1].period
df_pop = df[df.test.str.startswith(name)]
if plot and folder:
save_to=os.path.join(folder,prefix+name+'_compnts='+str(n_comps) +'_per=' + str(per))
_, _, _, rhythm_params, _ = population_fit(df_pop, n_components = n_comps, period = per, plot = plot, save_to = save_to, params_CI = True, **kwargs)
row = dict(row[1])
for param in parameters_to_analyse:
row[f'{param}'] = rhythm_params[f'{param}']
row[f'CI({param})'] = rhythm_params[f'CI({param})']
row[f'p({param})'] = rhythm_params[f'p({param})']
row[f'q({param})'] = np.nan
df_results_extended = df_results_extended.append(row, ignore_index=True, sort=False)
for param in parameters_to_analyse:
df_results_extended[f'q({param})'] = multi.multipletests(df_results_extended[f'p({param})'], method = 'fdr_bh')[1]
return df_results_extended
"""
****************************
* end of analysis wrappers *
****************************
"""
def plot_tuples_best_models(df, df_best_models, tuples, colors = ['black', 'red'], folder = '', **kwargs):
for T in tuples:
min_x = 1000
max_x = -1000
min_y = 1000
max_y = -1000
for test, color in zip(T, colors):
model = df_best_models[df_best_models["test"] == test].iloc[0]
n_components = model.n_components
period = model.period
X, Y = np.array(df[df.test == test].x), np.array(df[df.test == test].y)
min_x = min(min(X), min_x)
if 'plot_measurements' in kwargs and kwargs['plot_measurements'] == False:
max_x = max(max(X % period), max_x)
else:
max_x = max(max(X), max_x)
min_y = min(min(Y), min_y)
max_y = max(max(Y), max_y)
fit_me(X, Y, n_components = n_components, period = period, name = test, save_to = "", plot_residuals = False, hold=True, color = color, **kwargs)
plt.title(" + ".join(T))
plt.axis([min(min_x,0), max_x, 0.9*min_y, 1.1*max_y])
plt.legend()
if folder:
save_to = os.path.join(folder,"+".join(T)+"_"+str(period)+"_"+str(n_components))
plt.savefig(save_to+'.png')
plt.savefig(save_to+'.pdf')
else:
plt.show()
plt.close()
def plot_tuples_best_population(df, df_best_models, tuples, colors = ['black', 'red'], folder = '', **kwargs):
for T in tuples:
min_x = 1000
max_x = -1000
min_y = 1000
max_y = -1000
for test, color in zip(T, colors):
model = df_best_models[df_best_models["test"] == test].iloc[0]
n_components = model.n_components
period = model.period
df_pop = df[df.test.str.startswith(test)]
X, Y = np.array(df_pop.x), np.array(df_pop.y)
min_x = min(min(X), min_x)
if 'plot_measurements' in kwargs and kwargs['plot_measurements'] == False:
max_x = max(max(X % period), max_x)
else:
max_x = max(max(X), max_x)
min_y = min(min(Y), min_y)
max_y = max(max(Y), max_y)
population_fit(df_pop, n_components = n_components, period = period, save_to = "", hold=True, color = color, **kwargs)
plt.title(" + ".join(T))
plt.axis([min(min_x,0), max_x, 0.9*min_y, 1.1*max_y])
plt.legend()
if folder:
save_to = os.path.join(folder,"+".join(T)+"_"+str(period)+"_"+str(n_components))
plt.savefig(save_to+'.png')
plt.savefig(save_to+'.pdf')
else:
plt.show()
plt.close()
def plot_tuples_models(df, tuples, n_components = 2, period = 24, colors = ['black', 'red'], folder = '', **kwargs):
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
for per in period:
for n_comps in n_components:
for T in tuples:
min_x = 1000
max_x = -1000
min_y = 1000
max_y = -1000
for test, color in zip(T, colors):
X, Y = np.array(df[df.test == test].x), np.array(df[df.test == test].y)
min_x = min(min(X), min_x)
if 'plot_measurements' in kwargs and kwargs['plot_measurements'] == False:
max_x = max(max(X % per), max_x)
else:
max_x = max(max(X), max_x)
min_y = min(min(Y), min_y)
max_y = max(max(Y), max_y)
fit_me(X, Y, n_components = n_comps, period = per, name = test, save_to = "", plot_residuals = False, hold=True, color = color, **kwargs)
plt.title(" + ".join(T))
plt.axis([min(min_x,0), max_x, 0.9*min_y, 1.1*max_y])
plt.legend()
if folder:
save_to = os.path.join(folder,"+".join(T)+"_"+str(per)+"_"+str(n_comps))
plt.savefig(save_to+'.png')
plt.savefig(save_to+'.pdf')
else:
plt.show()
plt.close()
def plot_tuples_population(df, tuples, n_components = 2, period = 24, colors = ['black', 'red'], folder = '', **kwargs):
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
for per in period:
for n_comps in n_components:
for T in tuples:
min_x = 1000
max_x = -1000
min_y = 1000
max_y = -1000
for test, color in zip(T, colors):
df_pop = df[df.test.str.startswith(test)]
X, Y = np.array(df_pop.x), np.array(df_pop.y)
min_x = min(min(X), min_x)
if 'plot_measurements' in kwargs and kwargs['plot_measurements'] == False:
max_x = max(max(X % per), max_x)
else:
max_x = max(max(X), max_x)
min_y = min(min(Y), min_y)
max_y = max(max(Y), max_y)
population_fit(df_pop, n_components = n_comps, period = per, save_to = "", hold=True, color = color, **kwargs)
plt.title(" + ".join(T))
plt.axis([min(min_x,0), max_x, 0.9*min_y, 1.1*max_y])
plt.legend()
if folder:
save_to = os.path.join(folder,"+".join(T)+"_"+str(per)+"_"+str(n_comps))
plt.savefig(save_to+'.png')
plt.savefig(save_to+'.pdf')
else:
plt.show()
plt.close()
def plot_df_models_population(df, df_models, folder="", model_type="lin"):
for row in df_models.iterrows():
pop = row[1].test
n_components = row[1].n_components
period = row[1].period
#X, Y = np.array(df[df.test == test].x), np.array(df[df.test == test].y)
df_pop = df[df.test.str.startswith(pop)]
if folder:
save_to = os.path.join(folder, pop+'_pop_compnts='+str(n_components) +'_per=' + str(period))
else:
save_to = ""
population_fit(df_pop, n_components = n_components, period = period, model_type = model_type, save_to = save_to)
def compare_models(RSS1, RSS2, DF1, DF2):
if DF2 < DF1:
F = ((RSS1 - RSS2)/(DF1 - DF2))/(RSS2/DF2)
return 1 - stats.f.cdf(F, DF1 - DF2, DF2)
else:
F = ((RSS2 - RSS1)/(DF2 - DF1))/(RSS1/DF1)
return 1 - stats.f.cdf(F, DF2 - DF1, DF1)
def ct_response(y, mu):
return ((y-mu)**2 - y) / mu
def ct_test(count, poiss_results):
mu = poiss_results.mu
y = count
ct = ct_response(y, mu)
ct_data=pd.DataFrame()
ct_data['ct_resp'] = ct
ct_data['mu'] = mu
ct_results = smf.ols('ct_resp ~ mu - 1', ct_data).fit()
alpha_ci95 = ct_results.conf_int(0.05).loc['mu']
print('\nC-T dispersion test: alpha = {:5.3f}, 95% CI = ({:5.3f}, {:5.3f})'.format(ct_results.params[0], alpha_ci95.loc[0], alpha_ci95.loc[1]))
alpha = ct_results.params[0]
return alpha
def compare_ANOVA(df, pairs, n_components = 3, period = 24):
# https://pythonfordatascience.org/anova-python/
# http://www.statistik.si/storitve/statisticne-analize-testi/anova-analiza-variance/
# https://www.youtube.com/watch?v=-yQb_ZJnFXw
P = []
for test1, test2 in pairs:
"""
df2 = df[(df['test'] == test1) | (df['test'] == test2)].copy()
df2['A'] = np.sin((df2['x']/period)*np.pi*2)
df2['B'] = np.cos((df2['x']/period)*np.pi*2)
if n_components >= 2:
df2['C'] = np.sin((df2['x']/(period/2))*np.pi*2)
df2['D'] = np.cos((df2['x']/(period/2))*np.pi*2)
if n_components >= 3:
df2['E'] = np.sin((df2['x']/(period/3))*np.pi*2)
df2['F'] = np.cos((df2['x']/(period/3))*np.pi*2)
"""
P.append(stats.f_oneway(df['y'][df['test'] == test1], df['y'][df['test'] == test2]).pvalue)
#results = smf.ols('y ~ test', data = df[(df['test'] == test1) | (df['test'] == test2)]).fit()
#print(results.summary())
return multi.multipletests(P, method = 'fdr_bh')[1]
#https://www.marsja.se/three-ways-to-carry-out-2-way-anova-with-python/
#https://pythonfordatascience.org/anova-2-way-n-way/
def compare_ANOVA2(df, pairs):
P = []
for test1, test2 in pairs:
data = df[(df['test'] == test1) | (df['test'] == test2)]
formula = 'y ~ x + test + x:test'
model = smf.ols(formula, data).fit()
aov_table = sm.stats.anova_lm(model, typ=2)
P.append(aov_table['PR(>F)']['x:test'])
#P.append(model.pvalues[-1])
return multi.multipletests(P, method = 'fdr_bh')[1]
#def test_phase(X1, Y1, X2, Y2, phase, period = 0, test1 = '', test2 = ''):
# X2 -= phase
# if period:
# X1 %= period
# X2 %= period
"""
Permutation test - does not work as well as it should.
Problem: when you move an individual from the first population to
the second one, rhythmicity is collapsed.
N ... number of permutations (if omitted, all permutations are used)
Procedure:
- for each permutation...
-- build permuted population 1 (pop1_perm) and permuted population 2 (pop2_perm)
-- build a cosinor model for pop1_perm and pop2_perm
-- evaluate rhythmicity params for pop1_perm and pop2_perm
-- evalute differences for rhythmicity params between pop1_perm and pop2_perm
-- add differences to a list
- calculate percentile score of the difference for rhythmicity params between population 1 and population 2
"""
"""
def permutation_test_population(df, pairs, period = 24, n_components = 2, lin_comp = False, model_type = 'lin', N = None):#, N=10=, permutations=[]):
df_results = pd.DataFrame(columns = ['pair', "d_amp", "p_d_amp", "d_acr", "p_d_acr", "d_mesor", "p_d_mesor"], dtype=float)
for pair in pairs:
df_pop1 = df[df.test.str.startswith(pair[0])]
df_pop2 = df[df.test.str.startswith(pair[1])]
_, statistics1, _, rhythm_params1, _ = population_fit(df_pop1, n_components = n_components, period = period, lin_comp= lin_comp, model_type = model_type, plot = False, plot_measurements=False, plot_individuals=False, plot_margins=False)
_, statistics2, _, rhythm_params2, _ = population_fit(df_pop2, n_components = n_components, period = period, lin_comp= lin_comp, model_type = model_type, plot = False, plot_measurements=False, plot_individuals=False, plot_margins=False)
p1, amplitude1, acrophase1, mesor1 = statistics1['p'], rhythm_params1['amplitude'], rhythm_params1['acrophase'], rhythm_params1['mesor']
p2, amplitude2, acrophase2, mesor2 = statistics2['p'], rhythm_params2['amplitude'], rhythm_params2['acrophase'], rhythm_params2['mesor']
#if p1 > 0.05 or p2 > 0.05:
# print(pair, "rhythmicity in one is not significant")
# continue
d_amp = abs(amplitude1 - amplitude2)
d_acr = abs(acrophase1 - acrophase2)
d_mesor = abs(mesor1 - mesor2)
amps, acrs, mesors = [], [], [] #[d_amp], [d_acr], [d_mesor]
tests1 = list(df_pop1.test.unique())
tests2 = list(df_pop2.test.unique())
#n_pop1 = len(tests1)
#n_pop2 = len(tests2)
#tests = np.array(tests1 + tests2)
permutations = generate_permutations_all(tests1, tests2)
if N:
permutations = np.array(list(permutations))
if N < len(permutations):
idxs = np.random.choice(np.arange(len(permutations)), size=N, replace=False)
permutations = permutations[idxs]
else:
idxs = np.random.choice(np.arange(len(permutations)), size=N, replace=True)
permutations = permutations[idxs]
#print(permutations)
for perm1, perm2 in permutations:
df_test1 = df[df.test.isin(perm1)]
df_test2 = df[df.test.isin(perm2)]
# could as well only permute the parameters of the models
_, statistics_test1, _, rhythm_params_test1, _ = population_fit(df_test1, n_components = n_components, period = period, lin_comp = lin_comp, model_type = model_type, plot = False, plot_measurements=False, plot_individuals=False, plot_margins=False)
_, statistics_test2, _, rhythm_params_test2, _ = population_fit(df_test2, n_components = n_components, period = period, lin_comp = lin_comp, model_type = model_type, plot = False, plot_measurements=False, plot_individuals=False, plot_margins=False)
p_test1, amplitude_test1, acrophase_test1, mesor_test1 = statistics_test1['p'], rhythm_params_test1['amplitude'], rhythm_params_test1['acrophase'], rhythm_params_test1['mesor']
p_test2, amplitude_test2, acrophase_test2, mesor_test2 = statistics_test2['p'], rhythm_params_test2['amplitude'], rhythm_params_test2['acrophase'], rhythm_params_test2['mesor']
if p_test1 <= 0.05 and p_test2 <= 0.05:
d_amp_test = abs(amplitude_test1 - amplitude_test2)
d_acr_test = abs(acrophase_test1 - acrophase_test2)
d_mesor_test = abs(mesor_test1 - mesor_test2)
else:
d_amp_test, d_acr_test, d_mesor_test = 0, 0, 0
amps.append(d_amp_test)
acrs.append(d_acr_test)
mesors.append(d_mesor_test)
p_d_amp = 1 - percentileofscore(amps, d_amp, 'rank')/100
p_d_acr = 1 - percentileofscore(acrs, d_acr, 'rank')/100
p_d_mesor = 1 - percentileofscore(mesors, d_mesor, 'rank')/100
d = {"pair": tuple(pair),
"d_amp": d_amp,
"p_d_amp": p_d_amp,
"d_acr": d_acr,
"p_d_acr": p_d_acr,
"d_mesor": d_mesor,
"p_d_mesor": p_d_mesor}
df_results = df_results.append(d, ignore_index=True)
return df_results
"""
"""
generate random permutations of two populations
"""
def generate_permutations(n_pop1, n_pop2, N):
n = n_pop1 + n_pop2
permutations = []
for _ in range(N):
R = np.random.permutation(n)
permutations.append((R[:n_pop1], R[n_pop1:]))
return permutations
"""
generate all possible permutations of two populations. Presumption: populations should be of equal sizes.
"""
def generate_permutations_all(pop1, pop2):
n_pop1 = len(pop1)
#n_pop2 = len(pop2)
permutations = set()
full = set(pop1 + pop2)
for i in range(1,n_pop1):
p1 = itertools.combinations(pop1,i)
p2 = itertools.combinations(pop2,n_pop1-i)
X = list(itertools.product(p1,p2))
# flatten
for i in range(len(X)):
X[i] = [a for b in X[i] for a in b]
for x in X:
x.sort()
y = list(set(full)-set(x))
y.sort()
z = [tuple(x), tuple(y)]
z.sort()
permutations.add(tuple(z))
return(permutations)
"""
* only approximative
* rhythm params should be calculated for each population on the population mean cosinor
* in this case, we evaluate rhythm params as means of rhythm params of each individual
(only approximately equals mean of rhythm params from the population)
N ... number of permutations (if omitted, all permutations are used)
Procedure:
- for each permutation...
-- build permuted population 1 (pop1_perm) and permuted population 2 (pop2_perm)
-- calculate means of rhythmicity params for pop1_perm and pop2_perm
-- evalute differences for rhythmicity params between pop1_perm and pop2_perm
-- add differences to a list
- calculate percentile score of the difference for rhythmicity params between population 1 and population 2
"""
def permutation_test_population_approx(df, pairs, period = 24, n_components = 2, n_components2 = None, period2 = None, N = None, parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], **kwargs):#, N=10=, permutations=[]):
n_components1 = n_components
period1 = period
if not n_components2:
n_components2 = n_components1
if not period2:
period2 = period1
columns = ['pair']
for param in parameters_to_analyse:
columns += [f'd_{param}', f'p(d_{param})']
df_results = pd.DataFrame(columns = columns, dtype=float)
for pair in pairs:
df_pop1 = df[df.test.str.startswith(pair[0])]
df_pop2 = df[df.test.str.startswith(pair[1])]
_, statistics1, _, _, _, ind_params1= population_fit(df_pop1, n_components = n_components1, period = period1, return_individual_params = True, **kwargs)#lin_comp= lin_comp, model_type = model_type, plot = False, plot_measurements=False, plot_individuals=False, plot_margins=False, return_individual_params=True)
_, statistics2, _, _, _, ind_params2 = population_fit(df_pop2, n_components = n_components2, period = period2, return_individual_params = True, **kwargs)#lin_comp= lin_comp, model_type = model_type, plot = False, plot_measurements=False, plot_individuals=False, plot_margins=False, return_individual_params=True)
p1 = statistics1['p']
p2 = statistics2['p']
#if p1 > 0.05 or p2 > 0.05:
# print(pair, ": rhythmicity in one is not significant", sep="")
# continue
mean_params1 = {}
mean_params2 = {}
ind_params_all = {}
d_params = {}
d_params_permute = {}
# equations below only present an approximation
for param in parameters_to_analyse:
if param in parameters_angular:
mean_params1[param] = project_acr(circmean(ind_params1[param], high = 0, low = -2*np.pi))
mean_params2[param] = project_acr(circmean(ind_params2[param], high = 0, low = -2*np.pi))
d_params[param] = project_acr(mean_params2[param] - mean_params1[param])
else:
mean_params1[param] = np.mean(ind_params1[param])
mean_params2[param] = np.mean(ind_params2[param])
d_params[param] = mean_params2[param] - mean_params1[param]
ind_params_all[param] = np.append(ind_params1[param], ind_params2[param])
d_params_permute[param] = []
n1 = len(list(df_pop1.test.unique()))
n2 = len(list(df_pop2.test.unique()))
permutations = np.array(list(generate_permutations_all(list(range(n1)), list(range(n1,n1+n2)))))
if N:
if N < len(permutations):
idxs = np.random.choice(np.arange(len(permutations)), size=N, replace=False)
permutations = permutations[idxs]
else:
idxs = np.random.choice(np.arange(len(permutations)), size=N, replace=True)
permutations = permutations[idxs]
for perm1, perm2 in permutations:
perm1 = np.array(perm1)
perm2 = np.array(perm2)
for param in parameters_to_analyse:
if param in parameters_angular:
test1 = project_acr(circmean(ind_params_all[param][perm1], high = 0, low = -2*np.pi))
test2 = project_acr(circmean(ind_params_all[param][perm2], high = 0, low = -2*np.pi))
d_test = project_acr(test2 - test1)
else:
test1 = np.mean(ind_params_all[param][perm1])
test2 = np.mean(ind_params_all[param][perm2])
d_test = test2 - test1
d_params_permute[param].append(d_test)
p_d = {}
d = {"pair": tuple(pair)}
for param in parameters_to_analyse:
p_d[param] = 1 - percentileofscore(np.abs(d_params_permute[param]), np.abs(d_params[param]), 'rank')/100
d[f'd_{param}'] = d_params[param]
d[f'p(d_{param})'] = p_d[param]
df_results = df_results.append(d, ignore_index=True)
if len(pairs) == 1:
return d
else:
return df_results
# eval parameters using bootstrap
# bootstrap type should be set to either std (CI = X+-1.96*STD(X)) or percentile (CI = [2.5th percentile, 97.5th percentile])
def eval_params_bootstrap(X, X_fit, X_test, X_fit_eval_params, Y, model_type, rhythm_params, bootstrap_size=1000, bootstrap_type='std', t_test=True, parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], period=24):
# generate and evaluate bootstrap samples
params_bs = {}
for param in parameters_to_analyse:
params_bs[param] = np.zeros(bootstrap_size)
idxs = np.arange(len(X))
for i in range(bootstrap_size):
idxs_bs = np.random.choice(idxs, len(idxs), replace=True)
Y_bs, X_fit_bs = Y[idxs_bs], X_fit[idxs_bs]
if model_type == 'lin':
model_bs = sm.OLS(Y_bs, X_fit_bs)
results_bs = model_bs.fit()
## https://python.hotexamples.com/examples/statsmodels.genmod.generalized_linear_model/GLM/fit_constrained/python-glm-fit_constrained-method-examples.html
#model_bs = sm.GLM(Y_bs, X_fit_bs)
#constr = "const>-1"
#results_bs = model_bs.fit_constrained(constr)
elif model_type == 'poisson':
#model_bs = sm.GLM(Y_bs, X_fit_bs, family=sm.families.Poisson())
model_bs = statsmodels.discrete.discrete_model.Poisson(Y_bs, X_fit_bs)
results_bs = model_bs.fit(disp=0)
elif model_type =='gen_poisson':
#model_bs = statsmodels.discrete.discrete_model.GeneralizedPoisson(Y_bs, X_fit_bs)
model_bs = statsmodels.discrete.discrete_model.GeneralizedPoisson(Y_bs, X_fit_bs, p=1)
results_bs = model_bs.fit(disp=0)
elif model_type == 'nb':
#model_bs = sm.GLM(Y_bs, X_fit_bs, family=sm.families.NegativeBinomial(alpha=alpha))
model_bs = statsmodels.discrete.discrete_model.NegativeBinomialP(Y_bs, X_fit_bs, p=1)
results_bs = model_bs.fit(disp=0)
#Y_test_bs = results_bs.predict(X_fit_test)
Y_eval_params_bs = results_bs.predict(X_fit_eval_params)
rhythm_params_bs = evaluate_rhythm_params(X_test, Y_eval_params_bs, period=period)
"""
if rhythm_params_bs['amplitude'] > np.max(Y_eval_params_bs):
print(results_bs.summary())
plt.plot(X[idxs_bs], Y_bs,'.')
plt.plot(X_test, Y_eval_params_bs)
plt.show()
"""
# remove the fits that exhibit divergence
for param in parameters_to_analyse:
if (abs(rhythm_params_bs['amplitude']) > (np.max(Y)-np.min(Y))) or ((rhythm_params_bs['period2']) and (rhythm_params_bs['period2'] < rhythm_params_bs['period2'])):
params_bs[param][i] = np.nan
else:
#plt.plot(X_test, Y_eval_params_bs, alpha=0.5)
params_bs[param][i] = rhythm_params_bs[param]
#plt.show()
# analyse bootstrap samples
DoF = bootstrap_size - len(results_bs.params)
n_params = len(results_bs.params)
rhythm_params['DoF'] = DoF
for param in parameters_to_analyse:
if param in parameters_angular:
angular = True
else:
angular = False
sample_bs = params_bs[param]
mean, p_val, CI = bootstrap_statistics(sample_bs, angular=angular, bootstrap_type = bootstrap_type, t_test= t_test, n_params=n_params)
rhythm_params[f'{param}_bootstrap'] = mean
rhythm_params[f'CI({param})'] = CI
rhythm_params[f'p({param})'] = p_val
return rhythm_params
# eval rhythmicity parameter differences using bootstrap in a combination with limorhyde
# bootstrap type should be set to either std (CI = X+-1.96*STD(X)) or percentile (CI = [2.5th percentile, 97.5th percentile])
def eval_params_diff_bootstrap(X, X_fit, X_full, X_fit_full, Y, model_type, locs, rhythm_params, bootstrap_size, bootstrap_type, t_test=True, parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], period1=24, period2=24):
params_bs = {}
for param in parameters_to_analyse:
params_bs[param] = | np.zeros(bootstrap_size) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 15 22:38:18 2020
@author: alankar
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
from matplotlib.lines import Line2D
import pickle
#Constants
kB = 1.3807e-16 #Boltzman's Constant in CGS
mp = 1.6726231e-24 #Mass of a Proton in CGS
GAMMA = 5./3 #Specific Heat Ratio for an Ideal Gas
fig = plt.figure(figsize=(30,30))
CHI = np.linspace(1.0,1000, 100000)
M1=0.5
M2=1.0
M3=1.5
cooling = np.loadtxt('cooltable.dat') #solar metallicity
LAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1])
#Problem Constants
mu = 0.672442
Tcl = 1.e4 #K
ncl = 0.1 # particles per cm^3
T_hot = CHI*Tcl
LAMBDA_HOT= LAMBDA(T_hot) #erg cm3 s-1 #LAMBDA at T_hot #GET IT FROM COOLTABLE.DAT
Tmix= np.sqrt(Tcl*T_hot) #K
LAMBDA_MIX = LAMBDA(Tmix) #erg cm3 s-1 #LAMBDA at T_mix #GET IT FROM COOLTABLE.DAT
ALPHA = 1.
n_hot=ncl/CHI
#Normalized Quantities
Tcl_4 = Tcl/1e4 #K
P3 = (ncl*Tcl)/1e3 #cm-3 K
CHI_100=(CHI)/100
LAMBDA_HOT_N23 = LAMBDA_HOT/1e-23 #erg cm3 s-1
LAMBDA_MIX_N21_4 = LAMBDA_MIX/(10**-21.4) #erg cm3 s-1
cs_hot=np.sqrt((GAMMA*kB*T_hot)/(mu*mp))
R1= (2 * (Tcl_4**(5/2)) * M1 * CHI_100 )/(P3*LAMBDA_MIX_N21_4*ALPHA)
R2= (2 * (Tcl_4**(5/2)) * M2 * CHI_100 )/(P3*LAMBDA_MIX_N21_4*ALPHA)
R3= (2 * (Tcl_4**(5/2)) * M3 * CHI_100 )/(P3*LAMBDA_MIX_N21_4*ALPHA)
pc=3.098e18
tcc1= (np.sqrt(CHI)*R1*pc)/(M1*cs_hot)
tcc2= (np.sqrt(CHI)*R2*pc)/(M2*cs_hot)
tcc3= (np.sqrt(CHI)*R3*pc)/(M3*cs_hot)
f1=0.9*((2*R1*(n_hot/0.01))**0.3)*((M1*(cs_hot/1.e7))**0.6)
f2=0.9*((2*R2*(n_hot/0.01))**0.3)*((M2*(cs_hot/1.e7))**0.6)
f3=0.9*((2*R3*(n_hot/0.01))**0.3)*((M3*(cs_hot/1.e7))**0.6)
t_life_pred1=10*tcc1*f1
t_life_pred2=10*tcc2*f2
t_life_pred3=10*tcc3*f3
t_cool_hot=((1/(GAMMA-1))*kB*T_hot)/(n_hot*LAMBDA_HOT)
Myr=365*24*60*60*1.e6
X=np.log10(t_cool_hot/Myr)
Y1=np.log10(t_life_pred1/Myr)
Y2=np.log10(t_life_pred2/Myr)
Y3=np.log10(t_life_pred3/Myr)
plt.plot(X,Y1,label='Gronke-Oh Criterion for $\mathrm{\mathcal{M}=0.5}$',linewidth=4.5)
plt.plot(X,Y2,label='Gronke-Oh Criterion for $\mathrm{\mathcal{M}=1.0}$',linewidth=4.5, color='red')
plt.plot(X,Y3,label='Gronke-Oh Criterion for $\mathrm{\mathcal{M}=1.5}$',linewidth=4.5, color='green')
############################################
data1=np.loadtxt('Li_pt_dest.dat')
X1=data1[:,0]
Y1=data1[:,1]
plt.plot(X1,Y1,'o', color='gray', markersize=30, label='Li Destroyed Clouds',alpha=0.5)
data1=np.loadtxt('Li_pt_grth.dat')
X1=data1[:,0]
Y1=data1[:,1]
plt.plot(X1,Y1,'^', color='gray', markersize=30, label='Li Growing Clouds', alpha=0.5)
#######################################################
############################################
M=0.5
R= [10.36,3.49]
pc=3.098e18
T_hot=1.e6
n_hot=0.001
cooling = np.loadtxt('cooltable.dat') #solar metallicity
LAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1])
LAMBDA_HOT=LAMBDA(T_hot)
cs_hot=np.sqrt((GAMMA*kB*T_hot)/(mu*mp))
tcc= (10*np.asarray(R)*pc)/(M*cs_hot)
f=0.9*((2*np.asarray(R)*(n_hot/0.01))**0.3)*((M*(cs_hot/1.e7))**0.6)
t_life_pred=10*tcc*f
t_cool_hot=(1.5*kB*T_hot)/(n_hot*LAMBDA_HOT)
Myr=365*24*60*60*1.e6
X=np.log10(t_cool_hot/Myr)
Y=np.log10(t_life_pred/Myr)
X,Y=np.meshgrid(X,Y)
marker_style = dict(color='tab:blue', linestyle='None', marker='^',
markersize=30, markerfacecoloralt='tab:red', markeredgewidth=5)
filling = Line2D.fillStyles[-1]
plt.plot(X,Y,label=r'Growing Clouds in Our Simulations for $\mathrm{\mathcal{M}=0.5}$',fillstyle=filling, **marker_style)
#######################################################
M=1.0
R= [14.0,5.47]
pc=3.098e18
T_hot=1.e6
n_hot=0.001
cooling = np.loadtxt('cooltable.dat') #solar metallicity
LAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1])
LAMBDA_HOT=LAMBDA(T_hot)
cs_hot=np.sqrt((GAMMA*kB*T_hot)/(mu*mp))
tcc= (10*np.asarray(R)*pc)/(M*cs_hot)
f=0.9*((2*np.asarray(R)*(n_hot/0.01))**0.3)*((M*(cs_hot/1.e7))**0.6)
t_life_pred=10*tcc*f
t_cool_hot=(1.5*kB*T_hot)/(n_hot*LAMBDA_HOT)
Myr=365*24*60*60*1.e6
X=np.log10(t_cool_hot/Myr)
Y=np.log10(t_life_pred/Myr)
X,Y=np.meshgrid(X,Y)
marker_style = dict(color='tab:red', linestyle='None', marker='^',
markersize=30, markerfacecoloralt='tab:red', markeredgewidth=5)
filling = Line2D.fillStyles[-1]
plt.plot(X,Y,label=r'Growing Clouds in Our Simulations for $\mathrm{\mathcal{M}=1.0}$',fillstyle=filling, **marker_style)
#############################################################
M=1.5
R= [17.0,7.16]
pc=3.098e18
T_hot=1.e6
n_hot=0.001
cooling = np.loadtxt('cooltable.dat') #solar metallicity
LAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1])
LAMBDA_HOT=LAMBDA(T_hot)
cs_hot=np.sqrt((GAMMA*kB*T_hot)/(mu*mp))
tcc= (10*np.asarray(R)*pc)/(M*cs_hot)
f=0.9*((2*np.asarray(R)*(n_hot/0.01))**0.3)*((M*(cs_hot/1.e7))**0.6)
t_life_pred=10*tcc*f
t_cool_hot=(1.5*kB*T_hot)/(n_hot*LAMBDA_HOT)
Myr=365*24*60*60*1.e6
X=np.log10(t_cool_hot/Myr)
Y=np.log10(t_life_pred/Myr)
X,Y=np.meshgrid(X,Y)
marker_style = dict(color='tab:green', linestyle='None', marker='^',
markersize=30, markerfacecoloralt='tab:red', markeredgewidth=5)
filling = Line2D.fillStyles[-1]
plt.plot(X,Y,label=r'Growing Clouds in Our Simulations for $\mathrm{\mathcal{M}=1.5}$',fillstyle=filling, **marker_style)
#######################################################
M=0.5
R=[23.92,124.06]
pc=3.098e18
T_hot=3.e6
n_hot=0.1/300
cooling = np.loadtxt('cooltable.dat') #solar metallicity
LAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1])
LAMBDA_HOT=LAMBDA(T_hot)
cs_hot=np.sqrt((GAMMA*kB*T_hot)/(mu*mp))
tcc= (17.32*np.asarray(R)*pc)/(M*cs_hot)
f=0.9*((2*np.asarray(R)*(n_hot/0.01))**0.3)*((M*(cs_hot/1.e7))**0.6)
t_life_pred=10*tcc*f
t_cool_hot=(1.5*kB*T_hot)/(n_hot*LAMBDA_HOT)
Myr=365*24*60*60*1.e6
X= | np.log10(t_cool_hot/Myr) | numpy.log10 |
import math
from collections import Counter, defaultdict
from datetime import datetime
import networkx as nx
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import pyproj
from numpy import mean, nanmean
from cognite.power.data_classes import PowerAssetList
# unified plotting colors
_MARKER_EDGE_COLOR = "rgb(85,150,210)"
_MARKER_FILL_COLOR = "rgb(230,230,230)"
# univeral transverse mercator zone 32 = south norway, germany
_LATLON_PROJ = "+proj=utm +zone=32, +north +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
_PROJECTION = pyproj.Proj(_LATLON_PROJ, preserve_units=True)
def _latlon_to_xy(lat, lon):
(x, y) = (lat, lon)
return (x, y)
def voltage_color(base_voltage: float):
color_map = [
(-1e9, "000000"),
(100, "000000"),
(132, "9ACA3C"),
(300, "20B3DE"),
(420, "ED1C24"),
(1e9, "ED1C24"),
]
color_map = [(v, tuple(int(h[i : i + 2], 16) for i in (0, 2, 4))) for v, h in color_map] # to rgb
ix_above = 0
while color_map[ix_above][0] < base_voltage:
ix_above += 1
t = (base_voltage - color_map[ix_above - 1][0]) / (color_map[ix_above][0] - color_map[ix_above - 1][0])
color = [
int(color_map[ix_above - 1][1][rgb] + t * (color_map[ix_above][1][rgb] - color_map[ix_above - 1][1][rgb]))
for rgb in range(3)
]
c = ",".join(map(str, color))
return f"rgb({c})"
def _flow_color(flow: float):
return voltage_color(base_voltage=flow)
def node_locations(power_area, interpolate_missing_positions=True):
node_loc = {
name: [
float(substation.metadata.get("PositionPoint.xPosition", math.nan)),
float(substation.metadata.get("PositionPoint.yPosition", math.nan)),
]
for name, substation in power_area._graph.nodes(data="object")
}
if interpolate_missing_positions:
orphan_count = 0
for it in range(2):
for s, loc in node_loc.items():
if math.isnan(loc[0]):
nb_locs = [
node_loc[n] for n in nx.neighbors(power_area._graph, s) if not math.isnan(node_loc[n][0])
]
mean_loc = [sum(c) / len(nb_locs) for c in zip(*nb_locs)]
if len(mean_loc) == 2:
node_loc[s] = mean_loc
elif it == 1:
node_loc[s] = [20, 55 + orphan_count] # TODO don't hardcode this
orphan_count += 1
return node_loc
def node_layout(power_area, position):
if position == "source":
node_positions = node_locations(power_area)
elif position == "project":
node_positions = {n: _latlon_to_xy(*xy) for n, xy in node_locations(power_area).items()}
elif position == "spring":
node_positions = nx.spring_layout(power_area._graph)
elif position == "kamada":
node_positions = nx.kamada_kawai_layout(power_area._graph)
else:
raise ValueError(f"Unknown layout {position}")
return node_positions
def create_substation_plot(node_locations, node_plot_mode):
text, x, y = zip(*[(k, v[0], v[1]) for k, v in node_locations.items()])
return go.Scatter(
x=x,
y=y,
text=text,
mode=node_plot_mode,
textposition="top center",
hoverinfo="text",
marker=dict(size=15, line=dict(color=_MARKER_EDGE_COLOR, width=2), color=_MARKER_FILL_COLOR),
)
def create_substation_map_plot(node_locations):
text, lon, lat = zip(*[(k, v[0], v[1]) for k, v in node_locations.items()])
# to get an edge color we plot the same data twice with difference marker size
plots = [
go.Scattermapbox(lat=lat, lon=lon, showlegend=False, marker=dict(size=17, color=_MARKER_EDGE_COLOR),),
go.Scattermapbox(
lat=lat,
lon=lon,
text=text,
mode="markers",
showlegend=False,
hoverinfo="text",
marker=dict(size=13, color=_MARKER_FILL_COLOR),
textposition="top center",
),
]
return plots
def edge_locations(power_area, node_locations):
# there is a gotcha here that having 100s of line plots is resource intensive, so making one for each
# ac line segment causes computers to catch fire. To get the coloring right we create one for each
# base voltage value, and then we split the line by adding nans. This makes the function unintuitive.
networkx_edges = power_area._graph.edges(data=True)
lons = defaultdict(list)
lats = defaultdict(list)
center_lons = defaultdict(list)
center_lats = defaultdict(list)
text = defaultdict(list)
counter = Counter([(edge[0], edge[1]) for edge in list(power_area._graph.edges(data=True))])
dups = {key: 1 for key in counter if counter[key] + counter[key[::-1]] == 2} # TODO: handle 3?
for acls in networkx_edges:
lon, lat = zip(*[node_locations[s] for s in acls[:2]])
center_lat = mean(lat)
center_lon = mean(lon)
if (acls[0], acls[1]) in dups:
# probably there are more elegant ways, but we want to offset the center in cases where there are multiple
# lines between two substations
lat_len = abs(lat[1] - lat[0])
lon_len = abs(lon[1] - lon[0])
edge_length = math.sqrt((lat_len) ** 2 + (lon_len) ** 2)
center_lat += 0.005 * dups[(acls[0], acls[1])] * lon_len / edge_length
center_lon += 0.005 * dups[(acls[0], acls[1])] * lat_len / edge_length
dups[(acls[0], acls[1])] *= -1
base_voltage = acls[2]["object"].metadata.get("BaseVoltage_nominalVoltage", "0")
lats[base_voltage] += [lat[0], center_lat, lat[1], math.nan]
lons[base_voltage] += [lon[0], center_lon, lon[1], math.nan]
center_lons[base_voltage].append(center_lon)
center_lats[base_voltage].append(center_lat)
text[base_voltage].append("{}: {} kV".format(acls[2]["object"].name, base_voltage))
return lats, lons, center_lats, center_lons, text
def create_line_segment_plot(x, y, center_x, center_y, text):
line_plots = [
go.Scatter(
x=x[base_voltage],
y=y[base_voltage],
line=dict(width=2, color=voltage_color(float(base_voltage)), shape="spline", smoothing=1.3),
hoverinfo="none",
mode="lines",
)
for base_voltage in x.keys()
]
center_plots = [
go.Scatter(
x=center_x[base_voltage],
y=center_y[base_voltage],
text=text[base_voltage],
mode="markers",
hoverinfo="text",
marker=dict(size=0.0001, color=voltage_color(float(base_voltage))),
)
for base_voltage in text.keys()
]
return line_plots, center_plots
def create_line_segment_map_plot(lats, lons, center_lats, center_lons, text):
line_plots = [
go.Scattermapbox(
mode="lines",
lon=lons[base_voltage],
lat=lats[base_voltage],
hoverinfo="none",
showlegend=False,
line=dict(color=voltage_color(float(base_voltage)), width=6),
)
for base_voltage in lats.keys()
]
center_plots = [
go.Scattermapbox(
lat=center_lats[base_voltage],
lon=center_lons[base_voltage],
text=text[base_voltage],
mode="markers",
showlegend=False,
hoverinfo="text",
marker=dict(size=0.0001, color=voltage_color(float(base_voltage))),
)
for base_voltage in text.keys()
]
return line_plots + center_plots
def _np_datetime_to_ms(np_datetime):
return np_datetime.astype("datetime64[ms]").astype("uint64")
class PowerPlot:
@staticmethod
def draw_with_map(power_area, height=None):
# plot substations
node_locs = node_locations(power_area, interpolate_missing_positions=False)
substation_plots = create_substation_map_plot(node_locs)
# plot ac line segments
lats, lons, center_lats, center_lons, text = edge_locations(power_area, node_locs)
ac_line_segment_plots = create_line_segment_map_plot(lats, lons, center_lats, center_lons, text)
center = nanmean([v for v in node_locs.values()], axis=0)
fig = go.Figure(
# ordering matters here: substations last so they are drawn on top
data=ac_line_segment_plots + substation_plots,
layout=go.Layout(
hovermode="closest",
mapbox_style="stamen-terrain",
margin={"r": 0, "t": 0, "l": 0, "b": 0},
height=height,
mapbox=dict(zoom=7, center=dict(lon=center[0], lat=center[1])),
),
)
return fig
@staticmethod
def draw(power_area, labels="fixed", position="kamada", height=None):
node_positions = node_layout(power_area, position)
node_plot_mode = "markers"
if labels == "fixed":
node_plot_mode += "+text"
# plot substations
substation_plot = create_substation_plot(node_positions, node_plot_mode)
# plot ac line segments
lats, lons, center_lats, center_lons, text = edge_locations(power_area, node_positions)
ac_line_segment_plots, ac_line_label_point_plot = create_line_segment_plot(
lons, lats, center_lons, center_lats, text
)
fig = go.Figure(
data=ac_line_segment_plots + ac_line_label_point_plot + [substation_plot],
layout=go.Layout(
height=height,
plot_bgcolor="rgb(250,250,250)",
titlefont_size=16,
showlegend=False,
hovermode="closest",
margin=dict(b=0, l=0, r=0, t=0),
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False, constrain="domain"),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False, scaleanchor="x"),
),
)
return fig
@staticmethod
def draw_flow(
power_area,
labels="fixed",
position="kamada",
height=None,
timeseries_type="estimated_value",
granularity="1h",
date: "np.datetime64" = None,
):
"""
Draws power flow through the area.
Args:
labels,position,height: as in `draw`
timeseries_type: type of time series to retrieve, i.e. value/estimated_value.
granularity: time step at which to average values over, as in the Python SDK `retrieve_dataframe` function.
date: datetime object at which to visualize flow, use None for now.
"""
node_plot_mode = "markers"
if labels == "fixed":
node_plot_mode += "+text"
node_positions = node_layout(power_area, position)
substation_plot = create_substation_plot(node_positions, node_plot_mode)
lats, lons, center_lats, center_lons, text = edge_locations(power_area, node_positions)
ac_line_segment_plots, ac_line_label_point_plot = create_line_segment_plot(
lons, lats, center_lons, center_lats, text
)
terminals = PowerAssetList(
list(set(sum([list(data["terminals"].values()) for f, t, data in power_area._graph.edges(data=True)], []))),
cognite_client=power_area._cognite_client,
)
ts = terminals.time_series(measurement_type="ThreePhaseActivePower", timeseries_type=timeseries_type)
analogs = power_area._cognite_client.assets.retrieve_multiple(ids=[t.asset_id for t in ts])
terminal_ids: List[int] = [a.parent_id for a in analogs]
target_time = np.datetime64(date or datetime.now())
delta = np.timedelta64(5, "D")
start = _np_datetime_to_ms((target_time - delta))
end = _np_datetime_to_ms((target_time + delta))
df = power_area._cognite_client.datapoints.retrieve_dataframe(
id=[t.id for t in ts],
aggregates=["average"],
granularity=granularity,
start=start, # TODO: split data prep and update
end=end,
include_aggregate_name=False,
)
df.columns = terminal_ids
ix = np.searchsorted(df.index, target_time, side="left")
flow_values = df.iloc[ix - 1, :]
title = f"flow at {df.index[ix - 1]}"
distances = [
np.linalg.norm(np.array(node_positions[edge[0]]) - np.array(node_positions[edge[1]]))
for edge in power_area._graph.edges
]
global_arrow_scale = 0.15 * np.mean(distances) # TODO: what is reasonable here?
arrow_traces = []
for f, t, data in power_area._graph.edges(data=True):
terminal_map = data["terminals"]
terminals = [terminal_map[f], terminal_map[t]]
flow_values_t = []
for side in [0, 1]:
val = np.nan
if terminals[side].id in flow_values.index:
val = flow_values[terminals[side].id]
if isinstance(val, pd.Series):
val = val.dropna()
val = val.mean() if not val.empty else np.nan
flow_values_t.append(val)
from_pos = np.array(node_positions[f])
to_pos = np.array(node_positions[t])
from_to_vec = to_pos - from_pos
distance = np.linalg.norm(from_to_vec)
arrow_scale = min(global_arrow_scale, 0.3 * distance)
from_to_vec /= max(distance, 0.1)
if flow_values_t[0] < flow_values_t[1]:
flow_vec = -from_to_vec
else:
flow_vec = from_to_vec
orthogonal = np.array([-flow_vec[1], flow_vec[0]])
mid = (from_pos + to_pos) / 2
sign_from = math.copysign(1, flow_values_t[0]) if not np.isnan(flow_values_t[0]) else 0
arrow_from_mid = mid - 0.5 * arrow_scale * from_to_vec # arrow middle is always closer to from
# direction of arrow depends on sign of flow
arrow_from_tail = arrow_from_mid - 0.33 * arrow_scale * flow_vec * sign_from
arrow_from_head = arrow_from_mid + 0.33 * arrow_scale * flow_vec * sign_from
arrow_from_left = arrow_from_tail - orthogonal * global_arrow_scale * 0.5
arrow_from_right = arrow_from_tail + orthogonal * global_arrow_scale * 0.5
sign_to = math.copysign(1, flow_values_t[1]) if not | np.isnan(flow_values_t[1]) | numpy.isnan |
from src.test_functions.test_function import Function
import numpy as np
class Zakharov(Function):
"""
https://www.sfu.ca/~ssurjano/zakharov.html
"""
def minimal_value(self, nb_dimensions):
return 0.0
def fitness_function_implementation(self, x, axis=1):
x = | np.asarray(x) | numpy.asarray |
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import norm
# Don't delete this line, even if PyCharm says it's an unused import.
# It is required for projection='3d' in add_subplot()
from mpl_toolkits.mplot3d import Axes3D
def rand_rotation_matrix(deflection=1.0, seed=None):
"""Creates a random rotation matrix.
Args:
deflection: the magnitude of the rotation. For 0, no rotation; for 1,
completely random rotation. Small deflection => small
perturbation.
DOI: http://www.realtimerendering.com/resources/GraphicsGems/gemsiii/rand_rotation.c
http://blog.lostinmyterminal.com/python/2015/05/12/random-rotation-matrix.html
"""
if seed is not None:
np.random.seed(seed)
theta, phi, z = np.random.uniform(size=(3,))
theta = theta * 2.0 * deflection * np.pi # Rotation about the pole (Z).
phi = phi * 2.0 * np.pi # For direction of pole deflection.
z = z * 2.0 * deflection # For magnitude of pole deflection.
# Compute a vector V used for distributing points over the sphere
# via the reflection I - V Transpose(V). This formulation of V
# will guarantee that if x[1] and x[2] are uniformly distributed,
# the reflected points will be uniform on the sphere. Note that V
# has length sqrt(2) to eliminate the 2 in the Householder matrix.
r = np.sqrt(z)
V = (np.sin(phi) * r,
np.cos(phi) * r,
np.sqrt(2.0 - z))
st = np.sin(theta)
ct = np.cos(theta)
R = np.array(((ct, st, 0), (-st, ct, 0), (0, 0, 1)))
# Construct the rotation matrix ( V Transpose(V) - I ) R.
M = (np.outer(V, V) - np.eye(3)).dot(R)
return M
def add_gaussian_noise_to_pcloud(pcloud, mu=0, sigma=1):
gnoise = | np.random.normal(mu, sigma, pcloud.shape[0]) | numpy.random.normal |
from comancpipeline.Analysis import BaseClasses
from comancpipeline.Tools import WCS, Coordinates, Filtering, Fitting, Types
from comancpipeline.Tools.WCS import DefineWCS
from comancpipeline.Tools.WCS import ang2pix
from comancpipeline.Tools.WCS import ang2pixWCS
import h5py
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import sys
from astropy.wcs import WCS
import os
import argparse
import configparser
from scipy.ndimage.filters import median_filter
from scipy.ndimage.filters import gaussian_filter,maximum_filter
from matplotlib import pyplot
class SimpleMap(object):
def __init__(self, xpix=100, ypix=100, cdeltx=1, cdelty=1, ctypex='RA---TAN', ctypey='DEC--TAN', x0=0, y0=0, filtertod=False, lon=0, lat=0):
self.xpix = int(xpix)
self.ypix = int(ypix)
self.cdeltx = float(cdeltx)/60.
self.cdelty = float(cdelty)/60.
self.x0 = int(x0)
self.y0 = int(y0)
self.ctype=[ctypex, ctypey]
self.naxis = [self.xpix, self.ypix]
self.cdelt = [self.cdeltx, self.cdelty]
self.crval = [self.x0, self.y0]
self.lon = lon
self.lat = lat
self.filtertod = filtertod
def __call__(self, container, filename=''):
self.run(container)
self.filename = filename
def run(self,container):
tod = container.getdset('spectrometer/tod')
ra = container.getdset('spectrometer/pixel_pointing/pixel_ra')
dec = container.getdset('spectrometer/pixel_pointing/pixel_dec')
mjd = container.getdset('spectrometer/MJD')
el = container.getdset('spectrometer/pixel_pointing/pixel_el')
#create wcs
self.crval = [np.nanmedian(ra), np.nanmedian(dec)]
self.wcs,_,_ = DefineWCS(naxis=self.naxis,
cdelt=self.cdelt,
crval=self.crval,
ctype=self.ctype)
maps = self.MakeMap(tod, ra, dec, mjd, el)
noisemap = self.MakeNoiseMap(tod, ra, dec, mjd, el)
#save wcs info
#container.setExtrasData('Mapping/WCS',
# self.wcs,
# [Types._OTHER_])
#save map
container.setExtrasData('Mapping/SimpleMaps',
maps,
[Types._HORNS_,
Types._SIDEBANDS_,
Types._FREQUENCY_,
Types._OTHER_, Types._OTHER_])
#save noise map
container.setExtrasData('Mapping/NoiseMaps',
noisemap,
[Types._HORNS_,
Types._SIDEBANDS_,
Types._FREQUENCY_,
Types._OTHER_, Types._OTHER_])
def initialPeak(self,tod, x, y):
rms = Filtering.calcRMS(tod)
r = np.sqrt((x)**2 + (y)**2)
close = (r < 10)
tod -= Filtering.estimateBackground(tod, rms, close)
dx, dy = 1./60., 1./60.
Dx, Dy = 1., 1.
npix = int(Dx/dx)
xpix, ypix = np.arange(npix+1), np.arange(npix+1)
xpix = xpix*dx - Dx/2.
ypix = ypix*dy - Dy/2.
m = np.histogram2d(x, y, xpix, weights=tod)[0]/np.histogram2d(x, y, xpix)[0]
m = median_filter(m, 3)
xmax,ymax = np.unravel_index(np.nanargmax(m),m.shape)
return xpix[xmax], ypix[ymax]
def MakeMap(self, tod, ra, dec, mjd, el):
#takes a 1D tod array and makes a simple map
#produce arrays for mapping
npix = self.naxis[0]*self.naxis[1]
pixbins = np.arange(0, npix+1).astype(int)
nHorns, nSBs, nChans, nSamples = tod.shape
rms = Filtering.calcRMS(tod)
maps = np.zeros((nHorns, nSBs, nChans, self.naxis[0], self.naxis[1]))
for i in range(nHorns):
good = (np.isnan(ra[i,:]) == False) & (np.isnan(tod[i,0,0]) == False)
pa = Coordinates.pa(ra[i,good], dec[i,good], mjd[good], self.lon, self.lat)
x, y = Coordinates.Rotate(ra[i,good], dec[i,good], self.crval[0], self.crval[1], -pa)
nbins = 10
xbins = np.linspace(np.min(x),np.max(x), nbins+1)
xmids = (xbins[1:] + xbins[:-1])/2.
xbw, _ = np.histogram(x,xbins)
ybw, _ = np.histogram(y,xbins)
todAvg = np.nanmean(np.nanmean(tod[i,...],axis=0),axis=0)
fitx0, fity0 = self.initialPeak(todAvg[good], x, y)
r = np.sqrt((x-fitx0)**2 + (y-fity0)**2)
close = (r < 6./60.)
pix = ang2pixWCS(self.wcs, ra[i,good], dec[i,good]).astype('int')
mask = np.where((pix != -1))[0]
h, b = np.histogram(pix, pixbins, weights=(pix != -1).astype(float))
self.hits = np.reshape(h, (self.naxis[0], self.naxis[1]))
for j in range(nSBs):
for k in range(nChans):
todmap = tod[i,j,k,good]
if self.filtertod:
txbw, _ = np.histogram(x,xbins, weights=todmap)
tybw, _ = np.histogram(y,xbins, weights=todmap)
fb = txbw/xbw
gd = np.isfinite(fb)
pmdl = np.poly1d(np.polyfit(xmids[gd],fb[gd],1))
todmap -= pmdl(x)
fb = tybw/ybw
gd = np.isfinite(fb)
pmdl = np.poly1d(np.polyfit(xmids[gd],fb[gd],1))
todmap -= pmdl(y)
w, b = np.histogram(pix[mask], pixbins, weights=todmap[mask])
# w, b = np.histogram(pix[:], pixbins, weights=tod[i,j,k,:])
m = np.reshape(w, (self.naxis[0], self.naxis[1]))
maps[i,j,k,...] = m/self.hits
pyplot.subplot(projection=self.wcs)
pyplot.imshow(maps[0,0,0,:,:])
pyplot.show()
return maps
def MakeNoiseMap(self, tod, ra, dec, mjd, el):
#takes a 1D tod array and makes a simple noise map
#produce arrays for mapping
npix = self.naxis[0]*self.naxis[1]
pixbins = np.arange(0, npix+1).astype(int)
rms = Filtering.calcRMS(tod)
#get noise rms and associated ra and dec
noise, ranew, dnew, mjdnew = Filtering.noiseProperties(tod,ra,dec,mjd)
nHorns, nSBs, nChans, nSamples = noise.shape
maps = np.zeros((nHorns, nSBs, nChans, self.naxis[0], self.naxis[1]))
for i in range(nHorns):
good = (np.isnan(ranew[i,:]) == False) & (np.isnan(noise[i,0,0]) == False)
pa = Coordinates.pa(ranew[i,good], dnew[i,good], mjdnew[good], self.lon, self.lat)
x, y = Coordinates.Rotate(ranew[i,good], dnew[i,good], self.crval[0], self.crval[1], -pa)
nbins = 10
xbins = np.linspace(np.min(x),np.max(x), nbins+1)
xmids = (xbins[1:] + xbins[:-1])/2.
xbw, _ = | np.histogram(x,xbins) | numpy.histogram |
import numpy as np
import pandas as pd
import scipy.stats as si
'''
This section is highly dependent upon knowledge of the black & scholes formula
for option pricing and using Monte Carlo methods to price options. There are
a number of terms such as d1, d2, delta, gamma, vega that are specific to
option ricing and I will not add comments to explain what these are. If you
are unfamiliar with this, read something like 'Options, Futures and Other
Derivatives' by <NAME>.
Note however that I use numpy arrays here, so when a calculation is performed,
I am often calculating multiple values at the same time. I assume an input
array containing multiple stock prices is passed in, which results n multiple
price, delta, gamma etx values being calculated and which will later be used
to plot graphs.
This module has two classes:
BlackScholes:
This calculates the price, delta, gamma etc of an option using the B&S Formula
BasicMonteCarloOption:
This calculates the price, delta, gamma etc by using monte carlo methods.
With this class I tend to return 2 argument (not 1) from the functions.
The second argument tends to be the standard deviation. So I may have
(optPrice, optStdDev) = calculateSomeValue( numpyArrayOfStockPrices )
This section is only for European Options and it does not include things such
as interest rate curves, borrow curves, volatility surface etc etc.
(ie it is a simplified version)
'''
class BlackScholes():
# Private Functions
def __init__(self, fltStrike, fltVol, fltRiskFreeRate, fltTimeToMaturity,
boolIsCall):
# Set the variables
self.__fltStrike = fltStrike
self.__fltVol = fltVol
self.__fltRiskFreeRate = fltRiskFreeRate
self.__fltTimeToMaturity = fltTimeToMaturity
self.__boolIsCall = boolIsCall
def __str__(self):
strF = 'EuropeanOption: [Strike:{strike}; Vol:{vol}; '\
'RFRate:{rfrate}; Time:{time}; IsCall:{iscall};]'
return strF.format(strike=self.__fltStrike,
vol=self.__fltVol,
rfrate=self.__fltRiskFreeRate,
time=self.__fltTimeToMaturity,
iscall=self.__boolIsCall)
def __getD1(self, npStock):
npSK = np.log(npStock / self.__fltStrike)
npTopD1 = npSK + (
self.__fltRiskFreeRate
+ (self.__fltVol ** 2) / 2
) * self.__fltTimeToMaturity
npD1 = npTopD1 / (self.__fltVol * np.sqrt(self.__fltTimeToMaturity))
return npD1
def __getD2(self, npStock):
npD1 = self.__getD1(npStock)
npD2 = npD1 - (self.__fltVol * np.sqrt(self.__fltTimeToMaturity))
return npD2
def __getD2FromD1(self, npD1):
npD2 = npD1 - (self.__fltVol * np.sqrt(self.__fltTimeToMaturity))
return npD2
def __getCallPrice(self, npStock):
npD1 = self.__getD1(npStock)
npD2 = self.__getD2FromD1(npD1)
npCall = npStock * si.norm.cdf(npD1)\
- (self.__fltStrike
* np.exp(-self.__fltRiskFreeRate * self.__fltTimeToMaturity)
* si.norm.cdf(npD2))
return npCall
def __getCallDelta(self, npStock):
npD1 = self.__getD1(npStock)
npDelta = si.norm.cdf(npD1)
return npDelta
def __getCallTheta(self, npStock):
npD1 = self.__getD1(npStock)
npD2 = self.__getD2FromD1(npD1)
npArg1 = -(npStock * si.norm.pdf(npD1) * self.__fltVol) \
/ (2 * np.sqrt(self.__fltTimeToMaturity))
npArg2 = -self.__fltRiskFreeRate * self.__fltStrike * np.exp(
-self.__fltRiskFreeRate * self.__fltTimeToMaturity) \
* si.norm.cdf(npD2)
npTheta = (npArg1 + npArg2) / 365
return npTheta
def __getCallRho(self, npStock):
npD2 = self.__getD2(npStock)
npRho = (self.__fltStrike * self.__fltTimeToMaturity * np.exp(
-self.__fltRiskFreeRate * self.__fltTimeToMaturity)
* si.norm.cdf(npD2)) * 0.01
return npRho
def __getPutPrice(self, npStock):
npD1 = self.__getD1(npStock)
npD2 = self.__getD2FromD1(npD1)
npPut = self.__fltStrike * np.exp(
-self.__fltRiskFreeRate * self.__fltTimeToMaturity) \
* si.norm.cdf(-npD2) - npStock * si.norm.cdf(-npD1)
return npPut
def __getPutDelta(self, npStock):
npD1 = self.__getD1(npStock)
npDelta = (si.norm.cdf(npD1) - 1)
return npDelta
def __getPutTheta(self, npStock):
npD1 = self.__getD1(npStock)
npD2 = self.__getD2FromD1(npD1)
npArg1 = -(npStock * si.norm.pdf(npD1) * self.__fltVol) \
/ (2 * np.sqrt(self.__fltTimeToMaturity))
npArg2 = self.__fltRiskFreeRate * self.__fltStrike * np.exp(
-self.__fltRiskFreeRate * self.__fltTimeToMaturity) \
* si.norm.cdf(-npD2)
npTheta = (npArg1 + npArg2) / 365
return npTheta
def __getPutRho(self, npStock):
npD2 = self.__getD2(npStock)
npRho = (- self.__fltStrike * self.__fltTimeToMaturity * np.exp(
-self.__fltRiskFreeRate * self.__fltTimeToMaturity)
* si.norm.cdf(-npD2)) * 0.01
return npRho
# Public Functions
def getOptionPrice(self, npStock):
if self.__boolIsCall:
return self.__getCallPrice(npStock)
else:
return self.__getPutPrice(npStock)
def getOptionDelta(self, npStock):
if self.__boolIsCall:
return self.__getCallDelta(npStock)
else:
return self.__getPutDelta(npStock)
def getOptionGamma(self, npStock):
# Gamma is Call/Put independent
npD1 = self.__getD1(npStock)
n1 = (si.norm.pdf(npD1))
d1 = (npStock * self.__fltVol * np.sqrt(self.__fltTimeToMaturity))
npGamma = n1 / d1
return npGamma
def getOptionVega(self, npStock):
# Vega is Call/Put independent
npD1 = self.__getD1(npStock)
npVega = npStock * (si.norm.pdf(npD1)) \
* np.sqrt(self.__fltTimeToMaturity) / 100
return npVega
def getOptionTheta(self, npStock):
if self.__boolIsCall:
return self.__getCallTheta(npStock)
else:
return self.__getPutTheta(npStock)
def getOptionRho(self, npStock):
if self.__boolIsCall:
return self.__getCallRho(npStock)
else:
return self.__getPutRho(npStock)
class BasicMonteCarloOption():
# Private Functions
def __init__(self, fltStrike, fltVol, fltRiskFreeRate, fltTimeToMaturity,
boolIsCall, intNoIter):
self.__fltStrike = fltStrike
self.__fltVol = fltVol
self.__fltRiskFreeRate = fltRiskFreeRate
self.__fltTimeToMaturity = fltTimeToMaturity
self.__boolIsCall = boolIsCall
self.__intNoIter = intNoIter
def __str__(self):
strF = 'BasicMonteCarloOption: [Strike:{strike}; Vol:{vol}; ' \
'RFRate:{rfrate}; Time:{time}; IsCall:{iscall}; ' \
'NoIter:{noiter}]'
return strF.format(strike=self.__fltStrike, vol=self.__fltVol,
rfrate=self.__fltRiskFreeRate,
time=self.__fltTimeToMaturity,
iscall=self.__boolIsCall,
noiter=self.__intNoIter)
def getOptionPrice(self, npStock):
# Get the random numbers
Z = np.random.standard_normal((1, self.__intNoIter))
# Now get the multipliers to find the final stock price
a1 = Z * self.__fltVol * np.sqrt(self.__fltTimeToMaturity)
a2 = (self.__fltRiskFreeRate - 0.5 * self.__fltVol ** 2) \
* self.__fltTimeToMaturity
Mult = np.exp(a1 + a2)
# For every stock price, get m_intNoIter final stock prices by doing
# a matrix multiplication. We multiply the initial stock price,by
# the multipliers to get the final stock price. I do need to change
# the stocks to a matrix to achive this.
npMatrix = npStock.copy()
npMatrix = np.reshape(npMatrix, (len(npStock), -1))
FinalS = np.matmul(npMatrix, Mult)
# Calculate the payoff
if self.__boolIsCall:
npPayoff = FinalS - self.__fltStrike
else:
npPayoff = self.__fltStrike - FinalS
# Build a matrix of zero's the same size as the payoff matrix.
npZeros = np.zeros(npPayoff.shape)
# Build a matrix of adjusted payoff, where the P&L if floored at zero.
npPayoffAdj = np.maximum(npPayoff, npZeros)
# Get the present value of the monte carlo simulations
npPV = npPayoffAdj * np.exp(
-self.__fltRiskFreeRate * self.__fltTimeToMaturity)
# Calculate the mean for each axis.
npPrice = np.mean(npPV, axis=1)
# Calculate the stdev for each axis.
npSTD = np.std(npPV, axis=1)
# Return the option price.
return (npPrice, npSTD)
def getOptionDelta(self, npStock):
# Get the random numbers
Z = np.random.standard_normal((1, self.__intNoIter))
# Now get the multipliers to find the final stock price
a1 = Z * self.__fltVol * np.sqrt(self.__fltTimeToMaturity)
a2 = (self.__fltRiskFreeRate - 0.5 * self.__fltVol ** 2) \
* self.__fltTimeToMaturity
Mult = np.exp(a1 + a2)
# For every stock price, get m_intNoIter final stock prices by doing
# a matrix multiplication. We multiply the initial stock price,by
# the multipliers to get the final stock price. I do need to change
# the stocks to a matrix to achive this.
npMatrix = npStock.copy()
npMatrix = np.reshape(npMatrix, (len(npStock), -1))
FinalS = np.matmul(npMatrix, Mult)
# Get a bumped stockprice and then calculate the final stockprice
npBump = npMatrix * 0.01
FinalSBump = np.matmul(npMatrix + npBump, Mult)
# Calculate the payoff
if self.__boolIsCall:
npPayoff = FinalS - self.__fltStrike
npPayoffBump = FinalSBump - self.__fltStrike
else:
npPayoff = self.__fltStrike - FinalS
npPayoffBump = self.__fltStrike - FinalSBump
# Build a matrix of zero's the same size as the payoff matrix.
npZeros = np.zeros(npPayoff.shape)
# Build a matrix of adjusted payoff, where the P&L if floored at zero.
npPayoffAdj = np.maximum(npPayoff, npZeros)
npPayoffAdjBump = np.maximum(npPayoffBump, npZeros)
# Get the present value of the monte carlo simulations
npPV = npPayoffAdj * np.exp(
-self.__fltRiskFreeRate * self.__fltTimeToMaturity)
npPVBump = npPayoffAdjBump * np.exp(
-self.__fltRiskFreeRate * self.__fltTimeToMaturity)
# Calculate the delta
npAllDelta = (npPVBump - npPV) / npBump
# Calculate the mean for each axis.
npDelta = np.mean(npAllDelta, axis=1)
# Calculate the stdev for each axis.
npDeltaSTD = np.std(npAllDelta, axis=1)
# Return the option price.
return (npDelta, npDeltaSTD)
def getOptionRho(self, npStock):
# Get the random numbers
Z = np.random.standard_normal((1, self.__intNoIter))
fltBump = 0.0001
fltRiskFreeRateBump = self.__fltRiskFreeRate + fltBump
# Now get the multipliers to find the final stock price
a1 = Z * self.__fltVol * np.sqrt(self.__fltTimeToMaturity)
a2 = (self.__fltRiskFreeRate - 0.5 * self.__fltVol ** 2) \
* self.__fltTimeToMaturity
Mult = np.exp(a1 + a2)
a1 = Z * self.__fltVol * np.sqrt(self.__fltTimeToMaturity)
a2 = (fltRiskFreeRateBump - 0.5 * self.__fltVol ** 2) \
* self.__fltTimeToMaturity
MultBump = np.exp(a1 + a2)
# For every stock price, get m_intNoIter final stock prices by
# doing a matrix multiplication. We multiply the initial stock
# price,by the transpose of the multipliers to get the final stock
# price
npMatrix = npStock.copy()
npMatrix = np.reshape(npMatrix, (len(npStock), -1))
FinalS = np.matmul(npMatrix, Mult)
# Get a bumped stockprice and then calculate the final stockprice
FinalSBump = np.matmul(npMatrix, MultBump)
# Calculate the payoff
if self.__boolIsCall:
npPayoff = FinalS - self.__fltStrike
npPayoffBump = FinalSBump - self.__fltStrike
else:
npPayoff = self.__fltStrike - FinalS
npPayoffBump = self.__fltStrike - FinalSBump
# Build a matrix of zero's the same size as the payoff matrix.
npZeros = np.zeros(npPayoff.shape)
# Build a matrix of adjusted payoff, where the P&L if floored at zero.
npPayoffAdj = np.maximum(npPayoff, npZeros)
npPayoffAdjBump = np.maximum(npPayoffBump, npZeros)
# Get the present value of the monte carlo simulations
npPV = npPayoffAdj * np.exp(
-self.__fltRiskFreeRate * self.__fltTimeToMaturity)
npPVBump = npPayoffAdjBump * np.exp(
-fltRiskFreeRateBump * self.__fltTimeToMaturity)
# Calculate the delta
npAllRho = (npPVBump - npPV) * (0.01 / fltBump)
# Calculate the mean for each axis.
npRho = np.mean(npAllRho, axis=1)
# Calculate the stdev for each axis.
npRhoSTD = np.std(npAllRho, axis=1)
# Return the option price.
return (npRho, npRhoSTD)
def getOptionGamma(self, npStock):
# Note the gamma may become unstable, see the following:
# https://quant.stackexchange.com/questions/18208/
# greeks-why-does-my-monte-carlo-give-correct-delta-but-incorrect-gamma
# Get the random numbers
Z = np.random.standard_normal((1, self.__intNoIter))
# Now get the multipliers to find the final stock price
a1 = Z * self.__fltVol * np.sqrt(self.__fltTimeToMaturity)
a2 = (self.__fltRiskFreeRate - 0.5 * self.__fltVol ** 2) \
* self.__fltTimeToMaturity
Mult = np.exp(a1 + a2)
# For every stock price, get m_intNoIter final stock prices by
# doing a matrix multiplication. We multiply the initial stock
# price,by the transpose of the multipliers to get the final stock
# price
npMatrix = npStock.copy()
npMatrix = np.reshape(npMatrix, (len(npStock), -1))
FinalS = np.matmul(npMatrix, Mult)
# Get a bumped stockprice and then calculate the final stockprice
npBump = npMatrix * 0.01
FinalSBumpPlus = np.matmul((npMatrix + npBump), Mult)
FinalSBumpMinus = np.matmul((npMatrix - npBump), Mult)
# Calculate the payoff
if self.__boolIsCall:
npPayoff = FinalS - self.__fltStrike
npPayoffBumpPlus = FinalSBumpPlus - self.__fltStrike
npPayoffBumpMinus = FinalSBumpMinus - self.__fltStrike
else:
npPayoff = self.__fltStrike - FinalS
npPayoffBumpPlus = self.__fltStrike - FinalSBumpPlus
npPayoffBumpMinus = self.__fltStrike - FinalSBumpMinus
# Build a matrix of zero's the same size as the payoff matrix.
npZeros = np.zeros(npPayoff.shape)
# Build a matrix of adjusted payoff, where the P&L if floored at zero.
npPayoffAdj = np.maximum(npPayoff, npZeros)
npPayoffAdjBumpPlus = np.maximum(npPayoffBumpPlus, npZeros)
npPayoffAdjBumpMinus = np.maximum(npPayoffBumpMinus, npZeros)
# Get the present value of the monte carlo simulations
npPV = npPayoffAdj * np.exp(
-self.__fltRiskFreeRate * self.__fltTimeToMaturity)
npPVBumpPlus = npPayoffAdjBumpPlus * np.exp(
-self.__fltRiskFreeRate * self.__fltTimeToMaturity)
npPVBumpMinus = npPayoffAdjBumpMinus * np.exp(
-self.__fltRiskFreeRate * self.__fltTimeToMaturity)
# Calculate the numerator and denominator
n1 = (npPVBumpPlus - (2 * npPV) + npPVBumpMinus)
d1 = (npBump * npBump)
# Calculate the delta
npAllGamma = n1 / d1
# Calculate the mean for each axis.
npGamma = np.mean(npAllGamma, axis=1)
# Calculate the stdev for each axis.
npGammaSTD = np.std(npAllGamma, axis=1)
# Return the option price.
return (npGamma, npGammaSTD)
def getOptionVega(self, npStock):
# Get the random numbers
Z = np.random.standard_normal((1, self.__intNoIter))
# Now get the multipliers to find the final stock price
a1 = Z * self.__fltVol * np.sqrt(self.__fltTimeToMaturity)
a2 = (self.__fltRiskFreeRate - 0.5 * (self.__fltVol ** 2)) \
* self.__fltTimeToMaturity
Mult = | np.exp(a1 + a2) | numpy.exp |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#Coastal Pioneer WireFollowing Profilers (WFP
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = | np.array([]) | numpy.array |
# solver backward-time central-space method (implicit method)
# Boundary condition is u(0,t)=0 and u_x(1,t)=0
import numpy as np
def BTCS(dt,dx,t_max,x_max,k,u0):
s = k*dt/dx**2
x = np.arange(0,x_max+dx,dx)
t = | np.arange(0,t_max+dt,dt) | numpy.arange |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File img_utils.py created on 22:39 2017/9/11
@author: <NAME>
@version: 1.0
"""
import numpy as np
import cv2
import os
def img_crop_to_array(image):
''' Divide an image into 8x8 small sprites,
and then convert them into numpy arrays.
Shape of returned array: (64, 32, 32, 3)
'''
sprite_size = 32
img = cv2.resize(image, (sprite_size * 8, sprite_size * 8))
np_image_data = np.asarray(img, dtype=np.float16)
np_image_data = cv2.normalize(np_image_data.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
np_data_4d = np.array([]).reshape((0, 32, 32, 3))
for x in range(8):
for y in range(8):
sprite = np_image_data[(x * sprite_size):((x + 1) * sprite_size),
(y * sprite_size):((y + 1) * sprite_size)]
np_data_4d = | np.concatenate((np_data_4d, sprite[np.newaxis, ...]), axis=0) | numpy.concatenate |
from imaginative_ddpg import GAN_DDPG
import numpy as np
from reward_model import Reward_Model as rew_mod
from collections import deque
from os import system
import time
def inverse_sigmoid(x):
return 1. / (1 + np.exp(x))
class GodelMachine(object):
def __init__(self):
self.origin_pacer = GAN_DDPG((300,), 2)
self.reward_model = rew_mod(300)
self.run()
def run(self):
state = deque(maxlen=300)
for i in range(300):
state.append(0)
nodems = False
if True:
mesa = 0
for i in range(1000000000):
action, action_raw = self.origin_pacer.get_action(np.expand_dims(np.float32( | np.array(state) | numpy.array |
import copy
import os
import os.path as osp
import pickle as pkl
import numpy as np
import torch
import torch.utils.data as data
# 1211: 1383
# Joints in H3.6M -- data has 32 joints,
# but only 17 that move; these are the indices.
H36M_NAMES = [''] * 32
H36M_NAMES[0] = 'Hip'
H36M_NAMES[1] = 'RHip'
H36M_NAMES[2] = 'RKnee'
H36M_NAMES[3] = 'RFoot'
H36M_NAMES[6] = 'LHip'
H36M_NAMES[7] = 'LKnee'
H36M_NAMES[8] = 'LFoot'
H36M_NAMES[12] = 'Spine'
H36M_NAMES[13] = 'Thorax'
H36M_NAMES[14] = 'Neck/Nose'
H36M_NAMES[15] = 'Head'
H36M_NAMES[17] = 'LShoulder'
H36M_NAMES[18] = 'LElbow'
H36M_NAMES[19] = 'LWrist'
H36M_NAMES[25] = 'RShoulder'
H36M_NAMES[26] = 'RElbow'
H36M_NAMES[27] = 'RWrist'
"""
dict_keys(['Directions', 'WalkingDog', 'Eating 2', 'Posing', 'Smoking 1', 'Phoning',
'TakingPhoto 1', 'Waiting', 'Walking 1', 'Purchases 1', 'Eating', 'Phoning 1', 'TakingPhoto',
'Directions 1', 'SittingDown 2', 'Discussion 1', 'Posing 1', 'WalkTogether 1', 'Purchases',
'SittingDown', 'Smoking', 'Greeting 1', 'Sitting 1', 'Sitting 2', 'WalkTogether', 'Waiting 1',
'WalkingDog 1', 'Greeting', 'Discussion', 'Walking'])
"""
def project_point_radial(P, R, T, f, c, k, p):
"""
Project points from 3d to 2d using camera parameters
including radial and tangential distortion
Args
P: Nx3 points in world coordinates
R: 3x3 Camera rotation matrix
T: 3x1 Camera translation parameters
f: (scalar) Camera focal length
c: 2x1 Camera center
k: 3x1 Camera radial distortion coefficients
p: 2x1 Camera tangential distortion coefficients
Returns
Proj: Nx2 points in pixel space
D: 1xN depth of each point in camera space
radial: 1xN radial distortion per point
tan: 1xN tangential distortion per point
r2: 1xN squared radius of the projected points before distortion
"""
# P is a matrix of 3-dimensional points
assert len(P.shape) == 2
assert P.shape[1] == 3
N = P.shape[0]
X = R.dot(P.T - T) # rotate and translate
XX = X[:2, :] / X[2, :]
r2 = XX[0, :] ** 2 + XX[1, :] ** 2
radial = 1 + np.einsum(
'ij,ij->j', np.tile(k, (1, N)), | np.array([r2, r2 ** 2, r2 ** 3]) | numpy.array |
#! python3
# coding: utf-8
# * =====================================================================================
# *
# * Filename: AMT_final.py
# *
# * Description: Python program for AMT2018 final thesis
# *
# * Version: 1.0
# * Created: 06/07/2018 07:09:10 PM
# * Revision: none
# * Interpreter: Python3.6
# *
# * Author: <NAME>, <EMAIL>
# * Organization: Tinjin University
# *
# * =====================================================================================
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from scipy.fftpack import fft
import math_tool
def polyfunc(r, theta):
# description: Definition of the Zernike polynomial. Takes in r and theta, which is
# the position based on cylindrical coordinate (CC), returns Z value on that
# position, just like the literal function do.
# param: r, theta
# return: sum, which represents the value of Z
ci = np.array([0 ,0, 0, 0, 1.205834, 1.209232, -0.073527,
0.283878, -0.047157, 0.69305, 0.0821, -0.520752,
-0.054379, -0.092302, 0.02262, -0.009395], dtype=np.double)
zi = np.array([1, r*np.cos(theta), r*np.sin(theta), 2*r**2-1,
r**2*np.cos(2*theta), r**2*np.sin(2*theta),
(3*r**2-2)*r*np.cos(theta), (3*r**2-2)*r*np.sin(theta),
6*r**4-6*r**2+1, r**3*np.cos(3*theta),
r**3*np.sin(3*theta), (4*r**2-3)*r**2*np.cos(2*theta),
(4*r**2-3)*r**2*np.sin(2*theta),
(10*r**4-12*r**2+3)*r*np.cos(theta),
(10*r**4-12*r**2+3)*r*np.sin(theta),
20*r**6-30*r**4+12*r**2-1], dtype = np.double)
sum = 0
for i in np.arange(0, np.shape(ci)[0]):
sum = sum+ci[i]*zi[i]
return sum
def rot(r, theta):
# description: Definition of the conical section. Similar to polyfunc
# param: r, theta
# return: result, which represents the value of Z
c = 1/594.51107
k = 0
numerator = c*r**2
denominator = 1+(1-(1+k)*c**2*r**2)**0.5
result = numerator/denominator
return result
def zfunc(r, theta):
# description: Definition of the surface shape function. This function
# is a combination of polyfunc and rot. Meanwhile, normalization
# for r is processed.
# param: r, theta
# return: result, which represents the value of Z
result = polyfunc(r/200, theta)+rot(r, theta)
return result
def rtheta2xy(func, x, y):
# description: Takes a combination of x and y (under rectangular coordinates)(RC),
# and gives the Z value of a function which takes cylindrical
# coordinate (CC) at that point.
# param: x, y: coordinate in rectangular coordinates (RC).
# return: result, which represents the value of Z
r = (x**2+y**2)**0.5
if x > 0:
theta = np.arctan(y/x)
elif x < 0:
theta = np.arctan(y/x)+np.pi
else:
if y>=0:
theta = np.pi/2
else:
theta = -np.pi/2
result = func(r, theta)
return result
def get_rad_seq(rtfunc, r, N):
# description: For a given function in cylindrical coordinate (CC), generate a
# sample sequence of Z value on a specific circle with radius r.
# param: rtfunc: the target function which is based on CC
# r: radius of the circle
# N: number of sample points
# return: result, which is a generated sequence with length N
result = | np.zeros(N) | numpy.zeros |
import os, sys
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import numpy as np
import pytest
import librosa
import soundpy as sp
test_dir = 'test_audio/'
test_audiofile = '{}audio2channels.wav'.format(test_dir)
test_traffic = '{}traffic.wav'.format(test_dir)
test_python = '{}python.wav'.format(test_dir)
test_horn = '{}car_horn.wav'.format(test_dir)
samples_48000, sr_48000 = librosa.load(test_audiofile, sr=48000)
samples_44100, sr_44100 = librosa.load(test_audiofile, sr=44100)
samples_22050, sr_22050 = librosa.load(test_audiofile, sr=22050)
samples_16000, sr_16000 = librosa.load(test_audiofile, sr=16000)
samples_8000, sr_8000 = librosa.load(test_audiofile, sr=8000)
def test_shape_samps_channels_mono():
input_data = np.array([1,2,3,4,5])
output_data = sp.dsp.shape_samps_channels(input_data)
assert np.array_equal(input_data, output_data)
def test_shape_samps_channels_stereo_correct():
input_data = np.array([1,2,3,4,5,6,7,8,9,10]).reshape(5,2)
output_data = sp.dsp.shape_samps_channels(input_data)
assert np.array_equal(input_data, output_data)
def test_shape_samps_channels_stereo_incorrect():
input_data = np.array([1,2,3,4,5,6,7,8,9,10]).reshape(2,5)
output_data = sp.dsp.shape_samps_channels(input_data)
assert np.array_equal(input_data.T, output_data)
def test_calc_phase():
np.random.seed(seed=0)
rand_fft = np.random.random(2) + np.random.random(2) * 1j
phase = sp.dsp.calc_phase(rand_fft)
value1 = np.array([0.67324134+0.73942281j, 0.79544405+0.60602703j])
assert np.allclose(value1, phase)
def test_calc_phase_framelength10_default():
frame_length = 10
time = np.arange(0, 10, 0.1)
signal = np.sin(time)[:frame_length]
fft_vals = np.fft.fft(signal)
phase = sp.dsp.calc_phase(fft_vals)
value1 = np.array([ 1. +0.j, -0.37872566+0.92550898j])
assert np.allclose(value1, phase[:2])
def test_calc_phase_framelength10_radiansTrue():
frame_length = 10
time = np.arange(0, 10, 0.1)
signal = np.sin(time)[:frame_length]
fft_vals = np.fft.fft(signal)
phase = sp.dsp.calc_phase(fft_vals, radians = True)
value1 = np.array([ 0., 1.95921533])
assert np.allclose(value1, phase[:2])
def test_reconstruct_whole_spectrum():
x = np.array([3.,2.,1.,0.,0.,0.,0.])
x_reconstructed = sp.dsp.reconstruct_whole_spectrum(x)
expected = np.array([3., 2., 1., 0., 1., 2., 3.])
assert np.array_equal(expected, x_reconstructed)
assert len(x_reconstructed) == len(x)
def test_reconstruct_whole_spectrum_input4_nfft7():
x = np.array([3.,2.,1.,0.])
n_fft = 7
x_reconstructed = sp.dsp.reconstruct_whole_spectrum(x, n_fft=n_fft)
expected = np.array([3., 2., 1., 0., 1., 2., 3.])
assert np.array_equal(expected, x_reconstructed)
assert len(x_reconstructed) == n_fft
def test_reconstruct_whole_spectrum_input4_nfft6():
x = np.array([3.,2.,1.,0.])
n_fft= 6
x_reconstructed = sp.dsp.reconstruct_whole_spectrum(x, n_fft=n_fft)
print(x_reconstructed)
expected = np.array([3., 2., 1., 0., 2., 3.])
assert np.array_equal(expected, x_reconstructed)
assert len(x_reconstructed) == n_fft
def test_reconstruct_whole_spectrum_input4_nfft5():
x = np.array([3.,2.,1.,0.])
n_fft = 5
x_reconstructed = sp.dsp.reconstruct_whole_spectrum(x, n_fft=n_fft)
print(x_reconstructed)
expected = np.array([3., 2., 1., 2., 3.])
assert np.array_equal(expected, x_reconstructed)
assert len(x_reconstructed) == n_fft
def test_reconstruct_whole_spectrum_input4_nfft14():
x = np.array([3.,2.,1.,0.])
n_fft = 14
x_reconstructed = sp.dsp.reconstruct_whole_spectrum(x, n_fft=n_fft)
print(x_reconstructed)
expected = | np.array([3., 2., 1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 2., 3.]) | numpy.array |
# -*- coding: utf-8
"""Module for testing reading charactersitic lines.
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location
tests/tools_tests/characteristics_tests.py
SPDX-License-Identifier: MIT
"""
from nose.tools import eq_
from tespy.tools.characteristics import (char_line, compressor_map,
load_default_char, load_custom_char)
from tespy.tools.helpers import extend_basic_path
from pkg_resources import resource_filename
import os
import json
import numpy as np
import shutil
class characteristics_tests:
def setup(self):
# create data path and write json files into path
self.path = extend_basic_path('data')
def test_custom_char_line_import(self):
# we need to write some data to the path first, using defaults
data_path = resource_filename('tespy.data', 'char_lines.json')
with open(data_path) as f:
raw_data = json.loads(f.read())
data = raw_data['heat exchanger']['kA_char2']
with open(os.path.join(self.path, 'char_lines.json'), 'w') as outfile:
json.dump(data, outfile)
char_original = load_default_char('heat exchanger', 'kA_char2',
'EVAPORATING FLUID', char_line)
char_custom = load_custom_char('EVAPORATING FLUID', char_line)
shutil.rmtree(self.path, ignore_errors=True)
x_cond = | np.array_equal(char_original.x, char_custom.x) | numpy.array_equal |
#!/usr/bin/env python3
# Copyright 2021 <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import os
from networks.tpdi_networks import DFCNetwork
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from matplotlib.colors import ListedColormap
from matplotlib.ticker import FormatStrFormatter
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['font.family'] = 'STIXGeneral'
out_dir = './logs/toy_experiments/fig2'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
np.random.seed(42)
torch.manual_seed(42)
n_in=5
n_hidden=[2]
n_out=2
nb_Q = 2000
nb_J_damped = 100
fit_on = 'total' # 'J', 'total', 'Q'
def rescale(matrix, scale=1.):
matrix_magnitude = np.linalg.norm(matrix)
return scale/matrix_magnitude * matrix
def all_positive_eig(A):
lamb = np.linalg.eigvals(A)
return sum(lamb.real<0) == 0
def all_negative_eig(A):
lamb = np.linalg.eigvals(A)
return sum(lamb.real>0) == 0
def generate_random_Q(jac):
while True:
permutation = np.random.randn(n_out,n_out)
Q_rand = np.matmul(jac.T, permutation)
if all_positive_eig( | np.matmul(jac, Q_rand) | numpy.matmul |
import torchaudio
import torch
import torch.utils.data as data
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import csv
import math
def getDFTFeature(filepath,win_size=1024,win_shift=512,preemphasis=False,channel_first=True,drop_dc=True,cut_len=5160,normalize=False):
'''
获取一个音频的对数DFT频谱
Args:
filepath: 音频路径
win_size: 窗口大小(点)
win_shift: 滑动距离(点)
preemphasis: 是否预强化。通过一阶差分弱低频强高频
channel_first: whether to put channels in first dimension
drop_dc: whether to drop DC component in spectrum (frequency==0)
cut_len: keep the fix number of points in time axis
normalize: 观察发现能量很小,对数能量在-100的数量级,有必要处理一下
Return:
(log_power_spectrum, phase_spectrum):能量谱与相位谱相叠形成的tensor
大小原本为(2C,T,M//2),C为通道数据,T为帧数,M为FFT点数
经转置后变为(T,M//2,2C)
'''
waveform, sample_freq = torchaudio.load(filepath)
m, n = waveform.shape
# padding to 2^k
if (n-win_size)%win_shift != 0:
waveform = torch.cat([waveform,torch.zeros(m,win_shift-(n-win_size)%win_shift)],dim=1)
n = waveform.shape[1]
# split frames into rows
frame_num = (n-win_size)//win_shift + 1
strided_input = waveform.as_strided((m,frame_num,win_size),(n,win_shift,1))
strided_input = strided_input - torch.mean(strided_input,dim=2).unsqueeze(2)
# pre-emphasis
preemphasis = 0.97
offset_strided_input = torch.nn.functional.pad(
strided_input, (1, 0), mode='replicate')
strided_input = strided_input - preemphasis*offset_strided_input[:,:,:-1]
# windowed and FFT
win_func = torch.hamming_window(win_size,periodic=False)
windowed_input = strided_input * win_func
fft = torch.rfft(windowed_input,1,normalized=False, onesided=True)*2/win_size
if drop_dc:
fft = fft[:,:,1:]
fft = fft[:,:cut_len,:]
power_spectrum = fft.pow(2).sum(3)
log_power_spectrum = torch.log10(power_spectrum)*10
# 对于能量谱正则化处理
mean_vec = log_power_spectrum.mean(axis=1,keepdim=True)
std_vec = log_power_spectrum.std(axis=1,keepdim=True)
log_power_spectrum = (log_power_spectrum-mean_vec)/std_vec
#
phase_spectrum = fft[:,:,:,0]/fft.pow(2).sum(3).sqrt()
phase_spectrum = torch.acos(phase_spectrum)
phase_spectrum[fft[:,:,:,0]<0] = -phase_spectrum[fft[:,:,:,0]<0]
spectrums = torch.cat([log_power_spectrum,phase_spectrum],dim=0)
if not channel_first:
spectrums = spectrums.permute(1,2,0)
return spectrums
class TUTDataset(data.Dataset):
'''
TUT audio dataset
'''
def __init__(self, data_folder,label_folder,sample_freq=44100,split_set=["split1"],ir_set=["ir0"],ov_set=["ov1"],cut_len=5160):
'''
Args:
data_folder: the path where audio files stored in
label_folder: the path where label files stored in
sample_freq: the sample frequency of audios
split_set: a list, containing the split sets to be loaded
ir_set: a list, containing the ir sets to be loaded
ov_set: a list, containing the ov sets to be loaded
cut_len: the length of feature in time axis
'''
super(TUTDataset).__init__()
self.data_folder = data_folder
self.label_folder = label_folder
self.file_names = list(os.listdir(data_folder))
self.sample_freq = sample_freq
self.cut_len = cut_len
# choose target set
self.split_set = set(split_set)
self.ir_set = set(ir_set)
self.ov_set = set(ov_set)
self.file_names = [name for name in self.file_names
if (set(name.split("_"))&self.split_set
and set(name.split("_"))&self.ir_set
and set(name.split("_"))&self.ov_set)]
self.name2idx, self.idx2name = self.getAllEvents()
self.name2idx['silence'] = 0
self.idx2name.insert(0,'silence')
self.num_class = len(self.idx2name)
self.frame_len = 1024
self.frame_shift = 512
def __getitem__(self,index):
'''get a data sample
Args:
index: the index
Return:
(sample_data,sample_sed_label,sample_doa_label):
sample_data: 2C x N x F matrix, C is the number of channels, N is time points, F is frequency bins.
sample_sed_label: N-dim vector, the class of sound event
sample_doa_label: N x 3K matrix, the position of sound event (at unit circle), the final dimension is like (x0,y0,z0,x1,y1,z1,...)
'''
file_name = self.file_names[index]
label_name = file_name.replace('.wav','.csv')
feature = getDFTFeature(os.path.join(self.data_folder,file_name))
label = self.getLabel(os.path.join(self.label_folder,label_name))
if(feature.shape[1]<5160):
feature = torch.cat((feature,torch.zeros(feature.shape[0],5160-feature.shape[1],feature.shape[2])),dim=1)
return feature,torch.LongTensor(label[0]),torch.tensor(label[1])
def getAllEvents(self):
'''get all event labels
Return:
(label2idx,idx2label)
label2idx: a dict to convert event name to index. events count froim 1
idx2label: a list to convert index to event name. 0 is corresponding to silence
'''
event_set = set([])
for filename in os.listdir(self.label_folder):
if '.csv' in filename:
with open(os.path.join(self.label_folder,filename),'r') as f:
reader = csv.DictReader(f)
for row in reader:
event_set.add(row['sound_event_recording'])
return {v:(k+1) for k,v in enumerate(event_set)},list(event_set)
def decode_one(self,sed_tensor,doa_tensor):
'''decode information of predict output
Args:
sed_tensor: a tensor of predict SED, size T x num_class+1
doa_tensor: a tensor of predict DOA, size T x (num_class+1)*3
Return:
all_events: a list of all events, every element is a dict storing name, start, end, ele, azi, etc. of a event.
'''
sed_tensor = sed_tensor.cpu().numpy()
doa_tensor = doa_tensor.cpu().numpy()
all_events = []
current_idx = set([])
onset = {}
N = sed_tensor.shape[0]
K = sed_tensor.shape[1]
for i in range(N):
# print(sed_tensor[i,:])
idx = set(np.where(sed_tensor[i,:]>0.5)[0])
# open new events
for v in idx:
if v not in onset:
onset[v] = i
# close old events
tmp = onset.copy()
for v in tmp.keys():
if v not in idx:
startp = onset.pop(v)
endp = i
start = ((startp-1)*self.frame_shift+self.frame_len)/self.sample_freq
end = ((endp-1)*self.frame_shift+self.frame_len)/self.sample_freq
x = np.mean(doa_tensor[startp:endp,v*3])
y = np.mean(doa_tensor[startp:endp,v*3+1])
z = np.mean(doa_tensor[startp:endp,v*3+2])
r = math.sqrt(x*x+y*y+z*z)
if r<1e-5:
ele=0
azi=0
else:
ele = math.asin(z/r)
azi = math.acos(max(min(x/(r*math.cos(ele)),1),-1))
if y<0:
azi = -azi
all_events.append({
"idx":v,
"event":self.idx2name[v],
"start":start,
"end":end,
"x":x,
"y":y,
"z":z,
"ele":ele*180/math.pi,
"azi":azi*180/math.pi
})
# events continuing to audio end
i = N-1
tmp = onset.copy()
print(onset)
for v in tmp.keys():
startp = onset.pop(v)
endp = i
start = ((startp-1)*self.frame_shift+self.frame_len)/self.sample_freq
end = ((endp-1)*self.frame_shift+self.frame_len)/self.sample_freq
x = np.mean(doa_tensor[startp:endp,v*3])
y = | np.mean(doa_tensor[startp:endp,v*3+1]) | numpy.mean |
"""Tests for the process module"""
# Copyright Contributors to the Climbing Ratings project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from .. import process
from ..normal_distribution import NormalDistribution
from ..process_helpers import TriDiagonalLU
from .assertions import assert_close_get
class TestProcessFunctions(unittest.TestCase):
"""Tests for functions in the climber module"""
def setUp(self) -> None:
np.seterr(all="raise")
self.assert_close = assert_close_get(self, self.__class__)
def test_invert_lu_dot_g(self) -> None:
"""Test that for X = invert_lu_dot_g(LU, G), LU X = G"""
g = np.array([10.0, 5.0, 32.0])
d = np.array([1.0, 3.0, 10.0])
b = np.array([-2.0, 1.0])
a = np.array([0.1, -2.0])
lu = TriDiagonalLU(d, b, a)
x = process._invert_lu_dot_g(lu, g)
u_matrix = np.diagflat(d) + np.diagflat(b, 1)
l_matrix = np.eye(3) + np.diagflat(a, -1)
lu_matrix = np.dot(l_matrix, u_matrix)
# Test that LU X = G.
lux = np.dot(lu_matrix, x)
self.assert_close(g, lux, "LU X")
def test_invert_lu(self) -> None:
"""Test invert_lu(LU, U'L') M = -I"""
m = np.array([1.0, -2.0, 0.0, 0.5, 2.0, 1.0, 0.0, 3.0, 11.0]).reshape((3, 3))
lu = TriDiagonalLU(
np.array([1.0, 3.0, 10.0]), | np.array([-2, 1.0]) | numpy.array |
import enum
from matplotlib.collections import LineCollection, PolyCollection
from scipy.spatial import Voronoi, Delaunay, voronoi_plot_2d, delaunay_plot_2d
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import matplotlib.cm as cm
from collections import defaultdict
import sys
eps = sys.float_info.epsilon
def voronoi_polygons_2D(vor, return_ridges=False):
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
radius = vor.points.ptp().max()
#radius = max(self.width, self.height)
# Construct a map containing all ridges for a given point
all_ridges = defaultdict(list)
#construct map of all regions and all ridge points.
#foreach pair of points that form a ridge. extract the ridge points and vertices.
for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):
all_ridges[p1].append((p2, v1, v2))
all_ridges[p2].append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all(v >= 0 for v in vertices):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an infinite ridge
t = vor.points[p2] - vor.points[p1] # tangent
t /= np.linalg.norm(t)
n = | np.array([-t[1], t[0]]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import sympy
from sympy import *
import sys
sys.path.append(r'C:\Users\elira\Google Drive\butools2\Python')
sys.path.append('/home/d/dkrass/eliransc/Python')
from tqdm import tqdm
from butools.ph import *
from butools.map import *
from butools.queues import *
from butools.mam import *
from butools.dph import *
from scipy.linalg import expm, sinm, cosm
from sympy import *
from sympy import Symbol
from sympy.physics.quantum import TensorProduct
import pickle as pkl
import pandas as pd
from sympy import diff, sin, exp
from numpy.linalg import matrix_power
def busy(s, lam2, mu2):
return ((lam2 + mu2 + s) - ((lam2 + mu2 + s) ** 2 - 4 * lam2 * mu2) ** 0.5) / (2 * lam2)
def ser_lap(s, mu):
return mu / (s + mu)
def hyper(s, lam1, lam2, mu1, mu2):
return ser_lap(s, mu1) * lam1 / (lam1 + lam2) + ser_lap(s, mu2) * lam2 / (lam1 + lam2)
def rho(lam1, lam2, mu1, mu2):
return (lam1 + lam2) * ((lam1 / ((lam1 + lam2) * mu1)) + (lam2 / ((lam1 + lam2) * mu2)))
def w_lap(s, lam1, lam2, mu1, mu2):
return ((1 - rho(lam1, lam2, mu1, mu2)) * s) / (s - (lam1 + lam2) * (1 - hyper(s, lam1, lam2, mu1, mu2)))
def F(s, lam1, lam2, mu1, mu2):
return w_lap(s, lam1, lam2, mu1, mu2) * ser_lap(s, mu1)
def A(s, lam1, lam2, mu2):
return (lam1 / (lam1 + lam2 - lam2 * (ser_lap(s, mu2))))
def beta(s, lam1, lam2, mu1, mu2):
return (lam1 / (lam1 + lam2 + s) + ((A(s, lam1, lam2, mu2) * lam2) / (lam1 + lam2 + s)) * (
ser_lap(s, mu2) - busy(s + lam1, lam2, mu2))) / (
1 - ((lam2 * busy(s + lam1, lam2, mu2)) / (lam1 + lam2 + s)))
def tau(s, lam1, lam2, mu1, mu2):
return ser_lap(s, mu1) * (A(s, lam1, lam2, mu2) * (
1 - F(lam1 + lam2 - lam2 * busy(s + lam1, lam2, mu2), lam1, lam2, mu1, mu2)) + F(
lam1 + lam2 - lam2 * busy(s + lam1, lam2, mu2), lam1, lam2, mu1, mu2) * beta(s, lam1, lam2, mu1, mu2))
def get_var(lam1, lam2, mu1, mu2):
s = Symbol('s')
y = tau(s, lam1, lam2, mu1, mu2)
dx = diff(y, s)
dxdx = diff(dx, s)
return dxdx.subs(s, 0) - (dx.subs(s, 0)) ** 2
def get_nth_moment(lam1, lam2, mu1, mu2, n):
s = Symbol('s')
y = tau(s, lam1, lam2, mu1, mu2)
for i in range(n):
if i == 0:
dx = diff(y, s)
else:
dx = diff(dx, s)
return dx.subs(s, 0)
def get_first_n_moments(parameters, n=5):
lam1, lam2, mu1, mu2 = parameters
moments = []
for n in range(1, n + 1):
moments.append(get_nth_moment(lam1, lam2, mu1, mu2, n) * (-1) ** n)
moments = np.array([moments], dtype='float')
return moments
def kroneker_sum(G, H):
size_g = G.shape[0]
size_h = H.shape[0]
return np.kron(G, np.identity(size_h)) + np.kron(np.identity(size_g), H)
def give_boundry_probs(R, A0, A1, A, B, C0, ro):
p00, p01, p02, p100, p110, p120, p101, p111, p121 = symbols('p00 p01 p02 p100 p110 p120 p101 p111 p121')
eqns = [np.dot(np.array([p00, p01, p02]), np.ones((A0.shape[0]))) - (1 - ro)]
eq3 = np.dot(np.array([p00, p01, p02]), A0) + np.dot(np.array([p100, p110, p120, p101, p111, p121]), A1)
eq1 = np.dot(np.array([p00, p01, p02]), C0)
eq2 = np.dot(np.array([p100, p110, p120, p101, p111, p121]), B + np.dot(R, A))
for eq_ind in range(B.shape[0]):
eqns.append(eq1[0, eq_ind] + eq2[0, eq_ind])
for eq_ind in range(A0.shape[0]):
eqns.append(eq3[0, eq_ind])
A_mat, b = linear_eq_to_matrix(eqns[:-1], [p00, p01, p02, p100, p110, p120, p101, p111, p121])
return A_mat, b
def get_expect_gph_system(R, p1_arr, xm_max=5000):
expected = 0
for pi_val in range(1, xm_max):
ui = p1_arr.reshape((1, R.shape[0]))
Ri = np.linalg.matrix_power(R, pi_val - 1)
expected += np.dot( | np.dot(ui, Ri) | numpy.dot |
import numbers
import numpy as np
import scipy.sparse as ss
import warnings
from .base import _BaseSparray
from .compat import (
broadcast_to, broadcast_shapes, ufuncs_with_fixed_point_at_zero,
intersect1d_sorted, union1d_sorted, combine_ranges, len_range
)
# masks for kinds of multidimensional indexing
EMPTY_SLICE_INDEX_MASK = 0b1
SLICE_INDEX_MASK = 0b10
INTEGER_INDEX_MASK = 0b100
ARRAY_INDEX_MASK = 0b1000
class FlatSparray(_BaseSparray):
'''Simple sparse ndarray-like, similar to scipy.sparse matrices.
Defined by three member variables:
self.data : array of nonzero values (may include zeros)
self.indices : sorted int64 array of nonzero flat indices
self.shape : tuple of integers, ala ndarray shape
'''
def __init__(self, indices, data, shape=None, is_canonical=False):
indices = np.array(indices, dtype=int, copy=False).ravel()
data = np.array(data, copy=False).ravel()
assert len(indices) == len(data), '# inds (%d) != # data (%d)' % (
len(indices), len(data))
if not is_canonical:
# sort and sum duplicates, but allow explicit zeros
indices, inv_ind = np.unique(indices, return_inverse=True)
data = np.bincount(inv_ind, weights=data).astype(data.dtype, copy=False)
if shape is None:
self.shape = (indices[-1] + 1,)
else:
self.shape = shape
assert np.prod(shape) >= len(data)
self.indices = indices
self.data = data
@property
def dtype(self):
return self.data.dtype
@staticmethod
def from_ndarray(arr):
'''Converts an array-like to a FlatSparray object.'''
arr = np.array(arr, copy=False)
mask = arr.flat != 0
idx, = np.nonzero(mask)
return FlatSparray(idx, arr.flat[mask], shape=arr.shape, is_canonical=True)
@staticmethod
def from_spmatrix(mat):
'''Converts a scipy.sparse matrix to a FlatSparray object'''
# attempt to canonicalize using scipy.sparse's code
try:
mat.sum_duplicates()
except AttributeError:
pass
mat = mat.tocoo()
inds = np.ravel_multi_index((mat.row, mat.col), mat.shape)
if (np.diff(inds) > 0).all():
# easy case: indices are pre-sorted
return FlatSparray(inds, mat.data, shape=mat.shape, is_canonical=True)
# do the sorting ourselves
order = np.argsort(inds)
return FlatSparray(inds[order], mat.data[order], shape=mat.shape,
is_canonical=True)
def toarray(self):
a = np.zeros(self.shape, dtype=self.data.dtype)
a.flat[self.indices] = self.data
return a
def tocoo(self):
assert len(self.shape) == 2
row, col = np.unravel_index(self.indices, self.shape)
return ss.coo_matrix((self.data, (row, col)), shape=self.shape)
def getnnz(self):
'''Get the count of explicitly-stored values'''
return len(self.indices)
nnz = property(fget=getnnz, doc=getnnz.__doc__)
def nonzero(self):
'''Returns a tuple of arrays containing indices of non-zero elements.
Note: Does not include explicitly-stored zeros.
'''
nz_inds = self.indices[self.data!=0]
return np.unravel_index(nz_inds, self.shape)
def transpose(self, *axes):
if self.ndim < 2:
return self
# axes control dimension order, defaults to reverse
if not axes:
axes = range(self.ndim - 1, -1, -1)
elif len(axes) == 1 and self.ndim > 1:
axes = axes[0]
new_shape = tuple(self.shape[i] for i in axes)
if self.shape == new_shape:
return self
# Hack: convert our flat indices into the new shape's flat indices.
old_multi_index = np.unravel_index(self.indices, self.shape)
new_multi_index = tuple(old_multi_index[i] for i in axes)
new_inds = np.ravel_multi_index(new_multi_index, new_shape)
return FlatSparray(new_inds, self.data, new_shape)
def diagonal(self, offset=0, axis1=0, axis2=1):
if axis1 == axis2:
raise ValueError('axis1 and axis2 cannot be the same')
if self.ndim < 2:
raise ValueError('diagonal requires at least two dimensions')
# TODO: support different axes, ndim > 2, etc
if self.ndim > 2:
raise NotImplementedError('diagonal() is NYI for ndim > 2')
if axis1 != 0 or axis2 != 1:
raise NotImplementedError('diagonal() is NYI for non-default axes')
if offset >= 0:
n = min(self.shape[0], self.shape[1] - offset)
ranges = np.array([[0, n, 1], [offset, n + offset, 1]],
dtype=self.indices.dtype)
else:
n = min(self.shape[0] + offset, self.shape[1])
ranges = np.array([[-offset, n - offset, 1], [0, n, 1]],
dtype=self.indices.dtype)
if n < 0:
return FlatSparray([], [], shape=(0,), is_canonical=True)
flat_idx = combine_ranges(ranges, self.shape, n, inner=True)
return self._getitem_flatidx(flat_idx, (n,))
def setdiag(self, values, offset=0):
if self.ndim < 2:
raise ValueError('setdiag() requires at least two dimensions')
# TODO: support different axes, ndim > 2, etc
if self.ndim > 2:
raise NotImplementedError('setdiag() is NYI for ndim > 2')
# XXX: copypasta from diagonal()
if offset >= 0:
n = min(self.shape[0], self.shape[1] - offset)
ranges = np.array([[0, n, 1], [offset, n + offset, 1]],
dtype=self.indices.dtype)
else:
n = min(self.shape[0] + offset, self.shape[1])
ranges = np.array([[-offset, n - offset, 1], [0, n, 1]],
dtype=self.indices.dtype)
if n <= 0:
return self
diag_indices = combine_ranges(ranges, self.shape, n, inner=True)
self._setitem_flatidx(diag_indices, values)
def __repr__(self):
return '<%s-FlatSparray of type %s\n\twith %d stored elements>' % (
self.shape, self.data.dtype, self.getnnz())
def __str__(self):
lines = []
multi_inds = np.unravel_index(self.indices, self.shape)
for x in zip(self.data, *multi_inds):
lines.append(' %s\t%s' % (x[1:], x[0]))
return '\n'.join(lines)
def reshape(self, new_shape):
try:
idx = new_shape.index(-1)
except ValueError:
assert np.prod(new_shape) >= len(self.data)
else:
assert sum(d == -1 for d in new_shape) == 1, 'Only one -1 allowed'
new_shape = list(new_shape)
new_shape[idx] = | np.prod(self.shape) | numpy.prod |
# Copyright (c) 2015-2019 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import copy
import os
import numpy as np
import scipy.signal
import astropy.io.fits as pf
from .. import qarray as qa
from ..timing import Timer, function_timer
from ..todmap import MapSampler
from ..tod import flagged_running_average, Interval
from .psd_math import autocov_psd, crosscov_psd
class OpNoiseEstim:
"""Noise estimation operator.
Args:
signal(str): Cache object name to analyze
flags(str): Cached flags to apply
detmask(byte): Flag bits to consider
commonmask(byte): Flag bits to consider
out(str): Output directory to write the PSDs to.
maskfile(str): FITS file name to read the mask from.
mapfile(str): FITS map to sample and subtract from the signal.
pol(bool): Sample also the polarized part of the map.
nbin_psd(int): Bin the resulting PSD.
lagmax(int): Maximum lag to consider for the covariance function.
stationary_period(float): Break the observation into several
estimation periods of this length [s].
nosingle(bool): Do not evaluate individual PSDs. Overridden by pairs.
nocross(bool): Do not evaluate cross-PSDs. Overridden by pairs.
calibrate_signal_estimate(bool): Regress, not just subtract the
signal estimate.
nsum(int): Downsampling factor for decimated data.
naverage(int): Smoothing kernel width for downsampled data.
apply_intervals(bool): If true, only measure the covariance
within each interval
pairs(iterable): Detector pairs to estimate noise for. Overrides
nosingle and nocross.
save_cov(bool): Save also the sample covariance.
"""
def __init__(
self,
signal=None,
flags=None,
detmask=1,
commonmask=1,
out=None,
maskfile=None,
mapfile=None,
pol=True,
nbin_psd=1000,
lagmax=10000,
stationary_period=86400.0,
nosingle=False,
nocross=True,
calibrate_signal_estimate=False,
nsum=10,
naverage=100,
apply_intervals=False,
pairs=None,
save_cov=False,
):
self._signal = signal
self._flags = flags
self._detmask = detmask
self._commonmask = commonmask
self._out = out
self._maskfile = maskfile
self._mapfile = mapfile
self._pol = pol
self._nbin_psd = nbin_psd
self._lagmax = lagmax
self._stationary_period = stationary_period
self._nosingle = nosingle
self._nocross = nocross
self._calibrate_signal_estimate = calibrate_signal_estimate
self._apply_intervals = apply_intervals
self._pairs = pairs
# Parameters for downsampling the data
self._nsum = nsum
self._naverage = naverage
self._save_cov = save_cov
@function_timer
def exec(self, data):
comm = data.comm.comm_group
masksampler = None
if self._maskfile:
masksampler = MapSampler(self._maskfile, comm=comm)
mapsampler = None
if self._mapfile:
mapsampler = MapSampler(self._mapfile, comm=comm, pol=True)
for obs in data.obs:
tod = obs["tod"]
if len(tod.local_dets) < len(tod.detectors):
raise RuntimeError(
"Noise estimation does not work on " "detector-split data"
)
dets = {}
for idet, det in enumerate(tod.detectors):
dets[det] = idet
ndet = len(dets)
if self._pairs is not None:
pairs = self._pairs
else:
# Construct a list of detector pairs
pairs = []
for idet1 in range(ndet):
det1 = tod.detectors[idet1]
for idet2 in range(idet1, ndet):
det2 = tod.detectors[idet2]
if det1 == det2 and self._nosingle:
continue
if det1 != det2 and self._nocross:
continue
pairs.append([det1, det2])
timestamps = tod.local_times()
commonflags = tod.local_common_flags()
commonflags = commonflags & self._commonmask != 0
fsample = 1 / np.median(np.diff(timestamps))
if "name" in obs:
fileroot = "noise_{}".format(obs["name"])
elif "id" in obs:
fileroot = "noise_{}".format(obs["id"])
else:
fileroot = "noise_{}".format(int(timestamps[0]))
if self._apply_intervals:
intervals = tod.local_intervals(obs["intervals"])
else:
intervals = [
Interval(
start=timestamps[0],
stop=timestamps[-1],
first=0,
last=timestamps.size - 1,
)
]
self.subtract_signal(tod, comm, masksampler, mapsampler, intervals)
# self.highpass_signal(tod, comm, intervals)
# Extend the gap between intervals to prevent sample pairs
# that cross the gap.
gap_min = np.int(self._lagmax) + 1
# Downsampled data requires longer gaps
gap_min_nsum = np.int(self._lagmax * self._nsum) + 1
offset, nsamp = tod.local_samples
gapflags = np.zeros_like(commonflags)
gapflags_nsum = np.zeros_like(commonflags)
for ival1, ival2 in zip(intervals[:-1], intervals[1:]):
gap_start = ival1.last + 1
gap_stop = max(gap_start + gap_min, ival2.first)
gap_stop_nsum = max(gap_start + gap_min_nsum, ival2.first)
if gap_start < offset + nsamp and gap_stop > offset:
gap_start = max(0, gap_start - offset)
gap_stop = min(offset + nsamp, gap_stop - offset)
gapflags[gap_start:gap_stop] = True
gap_stop_nsum = min(offset + nsamp, gap_stop_nsum - offset)
gapflags_nsum[gap_start:gap_stop_nsum] = True
# FIXME: This operator needs to handle situations where
# det1 and det2 are not on the same process. Then the
# check at the top of the loop can be removed.
for det1, det2 in pairs:
if det1 not in dets or det2 not in dets:
# User-specified pair is invalid
continue
signal1 = tod.local_signal(det1)
flags1 = tod.local_flags(det1, name=self._flags)
flags = flags1 & self._detmask != 0
signal2 = None
flags2 = None
if det1 != det2:
signal2 = tod.local_signal(det2)
flags2 = tod.local_flags(det2, name=self._flags)
flags[flags2 & self._detmask != 0] = True
flags[commonflags] = True
self.process_noise_estimate(
signal1,
signal2,
flags,
gapflags,
gapflags_nsum,
timestamps,
fsample,
comm,
fileroot,
det1,
det2,
intervals,
)
return
def highpass_signal(self, tod, comm, intervals):
""" Suppress the sub-harmonic modes in the TOD by high-pass
filtering.
"""
tm = Timer()
tm.start()
rank = 0
if comm is not None:
rank = comm.rank
if rank == 0:
print("High-pass-filtering signal", flush=True)
for det in tod.local_dets:
signal = tod.local_signal(det, name=self._signal)
flags = tod.local_flags(det, name=self._flags)
flags &= self._detmask
for ival in intervals:
ind = slice(ival.first, ival.last + 1)
sig = signal[ind]
flg = flags[ind]
trend = flagged_running_average(
sig, flg, self._lagmax, return_flags=False
)
sig -= trend
if comm is not None:
comm.barrier()
tm.stop()
if rank == 0:
tm.report("TOD high pass")
return
def subtract_signal(self, tod, comm, masksampler, mapsampler, intervals):
""" Subtract a signal estimate from the TOD and update the
flags for noise estimation.
"""
if mapsampler is None and masksampler is None:
return
tm = Timer()
tm.start()
rank = 0
if comm is not None:
rank = comm.rank
if rank == 0:
print("Subtracting signal", flush=True)
for det in tod.local_dets:
if det.endswith("-diff") and not self._pol:
continue
# if comm.rank == 0:
# print('Subtracting signal for {}'.format(det), flush=True)
# tod.cache.report()
epsilon = 0 # FIXME: Where can we get this for every detector?
eta = (1 - epsilon) / (1 + epsilon)
signal = tod.local_signal(det, name=self._signal)
flags = tod.local_flags(det, name=self._flags)
flags &= self._detmask
try:
quats = tod.local_pointing(det)
except AttributeError:
quats = None
if quats is None:
continue
iquweights = tod.local_weights(det)
for ival in intervals:
ind = slice(ival.first, ival.last + 1)
sig = signal[ind]
flg = flags[ind]
quat = quats[ind]
theta, phi = qa.to_position(quat)
iquw = iquweights[ind, :]
if masksampler is not None:
maskflg = masksampler.at(theta, phi) < 0.5
flg[maskflg] |= 255
if mapsampler is not None:
if self._pol:
bg = mapsampler.atpol(theta, phi, iquw)
else:
bg = mapsampler.at(theta, phi) * iquw[:, 0]
if self._calibrate_signal_estimate:
good = flg == 0
ngood = | np.sum(good) | numpy.sum |
# -*- coding: utf-8 -*-
"""Monte Carlo simulations of the Ising model using the Metropolis algorithm.
Copyright (c) Mason DataMaterials Group.
Distributed under the terms of the MIT License.
Notes
-----
Our code is an adaptation of the Ising model code example in chapter 7.7.1,
pages 318–319, of *Annotated Algorithms in Python* [ann_algo_python]_,
which was released under the BSDv3 License. The starting code remains under
that license, while all subsequent changes and new features are released under
the MIT license.
References
----------
.. [ann_algo_python] <NAME>, *Annotated Algorithms in Python: With
Applications in Physics, Biology, and Finance*, 1st ed. (Experts4solutions,
Lexington, KY, 2014).
"""
import logging
import math
import random
from pathlib import Path
import numpy as np
import pandas as pd
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2017, Mason DataMaterials Group"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "August 31, 2017"
logger = logging.getLogger(__name__)
class Ising(object):
"""Monte Carlo simulation of the Ising model."""
coords = []
def __init__(self, number_sites_along_xyz):
"""Set the number of sites in the cubic lattice.
Parameters
----------
nsites_along_xyz : int
Sets the number of sites along each side of the cubic lattice.
Defines a lattice with total sites equal to nsites_along_xyz**3
"""
self.number_sites_along_xyz = number_sites_along_xyz
self.site_spin = [[[1 for x in range(number_sites_along_xyz)]
for y in range(number_sites_along_xyz)]
for z in range(number_sites_along_xyz)]
self.magnetization = number_sites_along_xyz**3
def __getitem__(self, site_index):
"""Return the spin value at the specified site index.
Uses periodic boundaries for neighbors outside of grid.
Parameters
----------
site_index : int
A site index from 0 to number_sites_along_xyz**3
"""
number_sites_along_xyz = self.number_sites_along_xyz
site_x, site_y, site_z = site_index
index_x = (site_x + number_sites_along_xyz) % number_sites_along_xyz
index_y = (site_y + number_sites_along_xyz) % number_sites_along_xyz
index_z = (site_z + number_sites_along_xyz) % number_sites_along_xyz
return self.site_spin[index_x][index_y][index_z]
def __setitem__(self, site_index, new_spin_value):
"""Set the spin value at the specified site index.
Uses periodic boundaries for neighbors outside of grid.
Parameters
----------
site_index : int
A site index from 0 to number_sites_along_xyz**3
new_spin_value : int
In the Ising model, the spin value is restricted to values of 1
or -1
"""
number_sites_along_xyz = self.number_sites_along_xyz
site_x, site_y, site_z = site_index
index_x = (site_x + number_sites_along_xyz) % number_sites_along_xyz
index_y = (site_y + number_sites_along_xyz) % number_sites_along_xyz
index_z = (site_z + number_sites_along_xyz) % number_sites_along_xyz
self.site_spin[index_x][index_y][index_z] = new_spin_value
def run_simulation(self, steps, temperature, external_field):
"""Run Monte Carlo simulation.
Parameters
----------
steps : int
Number of time steps to run the simulation.
external_field : float
External magnetic field in units of (check units)
temperature : float
System temperature in units of (check units)
Returns
-------
simulation_statistics
The mean and standard deviation of the magnetization at a given
temperature.
"""
logger.info("Starting simulation run for t=%s, h=%s, steps=%s",
temperature, external_field, steps)
magnetization_history = self._monte_carlo_simulation(
steps=steps, temperature=temperature,
external_field=external_field)
mean_magnetization = | np.mean(magnetization_history) | numpy.mean |
import numpy as np
import subprocess
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
import scipy.stats
from .configuring_parameters import ConfiguringParameters
from .utils import *
from . import diamonds_functions as diamonds
from . import asteroseismic_functions as astero
__all__ = ['FamedStar',]
class FamedStar(object):
"""
Generic class to represent a star processed by the FAMED pipeline.
Longer description
Parameters
----------
catalog_id : str
Catalogue ID of the star (e.g. 'KIC' for Kepler ot 'TIC' for TESS).
star_id : str
ID of the star as a string (e.g. '0012008916' or '7037405').
teff : float
Effective temperature of the star in Kelvin.
"""
def __init__(self, catalog_id, star_id, teff):
self.catalog_id = catalog_id
self.star_id = star_id
self.teff = teff
self.cp = ConfiguringParameters()
peakbagging_results_dir = self.cp.diamonds_path/'PeakBagging'/'results'
self.star_dir = peakbagging_results_dir/(self.catalog_id+self.star_id)
# Read in the fitted background parameters
background_results_dir = self.cp.diamonds_path/'Background'/'results'
if self.cp.print_on_screen:
if self.cp.external_background_results_dir != '-99':
print(' Using an external background fit provided by the user')
self.bgp = diamonds.get_background(self.catalog_id, self.star_id, background_results_dir, self.cp.background_run_number, self.cp.external_background_results_dir, self.cp.external_background_filename_suffix)
### Methods to be used by mupltiple modalities (e.g. GLOBAL and CHUNK)
def compute_acf_dnu(self, scaling_dnu, par_hist, asef_hist):
"""
Compute `dnu` from the autocorrelation of the timeseries.
This function computes the ACF**2 of the multi-modal sampling obtained
with DIAMONDS to evaluate an estimate of the large frequency separation
`dnu`. The final value is estimated from a Gaussian fit to the ACF**2.
Parameters
----------
scaling_dnu : float
The large frequency separation as estimated from the scaling laws.
par_hist : array
asef_hist : array
Returns
-------
acf_dnu : float
The peak of the Gaussian curve fit to the ACF**2 of the timeseries.
interpolated_dnu : array
Interpolated array of the `dnu` values. Used primarily for plotting.
interpolated_acf : array
Interpolated array of the ACF**2. Used primarily for plottoing.
"""
# Set the limits based on scaling_dnu
dnu_range_side = scaling_dnu * self.cp.dnu_acf_range_side
top_dnu = scaling_dnu + dnu_range_side
bottom_dnu = scaling_dnu - dnu_range_side
# Autocorrelate the ASEF
freqbin = par_hist[1] - par_hist[0]
dnu_range_bins = round((top_dnu-bottom_dnu)/freqbin)+1
bottom_dnu_bins = round(bottom_dnu/freqbin)
lag = np.arange(dnu_range_bins) + bottom_dnu_bins
temp = asef_hist-np.mean(asef_hist)
norm = np.sum(temp**2)
result = np.correlate(temp,temp,mode='full')/norm
result = result[len(result)//2:][lag]
result = result-np.min(result)
# Interpolate the results
n_interpol = 101
interpolated_dnu = np.arange(n_interpol)/(n_interpol-1)*(top_dnu - bottom_dnu) + bottom_dnu
interpolated_acf = interp1d(lag*freqbin,result,'cubic',fill_value='extrapolate',bounds_error=False)(interpolated_dnu)
j = np.argmax(interpolated_acf)
best_acf = interpolated_acf[j]
acf_dnu = interpolated_dnu[j]
# Perform a Gaussian fit to the ACF^2 from the smoothed PSD
x = interpolated_dnu
y = interpolated_acf**2
coeff,cov = curve_fit(gaussian_func,x,y,p0=[max(y),acf_dnu,acf_dnu/10,min(y),0,0])
acf_dnu = coeff[1]
return acf_dnu, interpolated_dnu, interpolated_acf
def compute_asef(self, param, distr, n_bins=100):
"""
Compute an Averaged Shifted Envelope Function (ASEF)
This function computes an Averaged Shifted Envelope Function (ASEF)
from an input sampling distribution.
Parameters
----------
param : array
The parameter over which we are computing the ASEF. In the case of
FAMED, this is the frequency.
distr : array
The distribution of the parameter to compute the ASEF for. In the
case of FAMED, this is the nested sampling iterations.
n_bins : int, default: 100
The number of bins used to compute the Average Shifted Histogram.
Returns
-------
param : array
The rebinned parameter distribution.
distr : array
The rebinned ASEF of the original distribution.
"""
min_par = min(param)
max_par = max(param)
binwidth = (max_par-min_par)/n_bins
# Now perform average shifting for ASH
n_sim = 20 # Number of combined histograms
simsize = binwidth/n_sim
distr_rebin = np.zeros(shape=(n_sim,n_bins))
param_rebin = np.zeros(shape=(n_sim, n_bins))
for k in range(n_sim):
# Define bin-edges.
bins = [min_par + j*binwidth + k*simsize for j in range(n_bins+1)]
rebin = scipy.stats.binned_statistic(
x = param,
values = distr,
statistic='max',
bins = bins,
)
bin_edges = rebin[1]
bin_centres = (bin_edges[1:] + bin_edges[:-1]) / 2
distr_rebin[k,:] = np.nan_to_num(rebin[0])
param_rebin[k,:] = bin_centres
param = param_rebin.mean(axis=0)
distr = distr_rebin.mean(axis=0)
return param, distr
def evaluate_sampling_frequencies(self, par0, par_hist, freq, spsd, maximum, range_maximum):
"""
Compute frequencies and uncertainties from the multi-modal fit.
This function computes the frequencies and their uncertainties from
the multi-modal fit of DIAMONDS. The extracted frequencies do not
necessarily correspond to real oscillation peaks but they correspond to
the extracted local maxima from the ASEF histogram. The function also
provides the sampling counts, SPSD local maximum for each frequency, as
well as an additional improvement to the definition of the frequency
range for each ASEF peak.
Parameters
----------
par0 : array
The sampled frequency from DIAMONDS multi-modal fit.
par_hist : array
The rebinned sampled frequency as returned by the ASEF
freq : array
Frequency axis of the smoothed PSD.
spsd : array
The smoothed PSD.
maximum : array
The length N array of frequencies where maxima occur in the ASEF as
returned by the hill climbing algorithm.
range_maximum : array
The 2xN array with the range of frequency to consider for evaluating
each maximum.
Returns
-------
sampled_estimates : dict
A dictionary containing the following keys: `freq1` for the
frequency, `freq1_sig` for the uncertainty, `sampling_counts` for
the total nested iterations values, and `spsd_maximum` for the
maxima of the smoothed PSD.
"""
n_maxima = len(maximum)
freq1 = np.zeros(n_maxima)
freq_sig1 = np.zeros(n_maxima)
sampling_counts = np.zeros(n_maxima)
spsd_maximum = np.zeros(n_maxima)
nest_iter = np.arange(len(par0))
range_maximim_old = range_maximum
freqbin = freq[1]-freq[0]
for i in range(0, n_maxima):
# Consider the frequency range around the local maximum
iterations = 0
while iterations < self.cp.max_iterations_frequency + 1:
upper_bound = range_maximum[1,i]
lower_bound = range_maximum[0,i]
tmp_freq_range = np.where((freq >= lower_bound) & (freq <= upper_bound))[0]
# Make sure that within each range, at least one frequency
# point is found.
while len(tmp_freq_range)<1:
upper_bound += freqbin/2
lower_bound -= freqbin/2
tmp_freq_range = np.where((freq >= lower_bound) & (freq <= upper_bound))[0]
range_maximum[1,i] = upper_bound
range_maximum[0,i] = lower_bound
tmp_range = np.where((par0 <= upper_bound) & (par0 >= lower_bound))[0]
par0_range = par0[tmp_range]
spsd_range = spsd[tmp_freq_range]
# Count the total of the nested iteration values falling in this
# maximum bin. Note this is not the number of nested sampling
# points, but it includes the actual nested iteration value from
# each point (which can be up to several thousands for an
# individual sampling point). In this way it is possible to
# better weight the local maxima by the actual level of
# likelihood that they have reached during the sampling.
sampling_counts[i] = np.sum(nest_iter[tmp_range])
# Save the maximum smoothed PSD in the region of this maximum
spsd_maximum[i] = np.max(spsd_range)
# Weighted by nested iteration value
freq1[i] = np.sum(par0_range*tmp_range**2)/np.sum(tmp_range**2)
freq_sig1[i] = np.sqrt(np.sum((par0_range-freq1[i])**2*tmp_range**2)/np.sum(tmp_range**2))
if iterations == 1:
break
# Improve frequency ranges for computation of uncertainties and
# evaluation of the number of sampling points. Do not exceed
# ranges by more than estimated sigma * X times (usually 2)
if upper_bound > freq1[i]+freq_sig1[i]*self.cp.max_sigma_range:
range_maximum[1,i] = freq1[i]+freq_sig1[i]*self.cp.max_sigma_range
if lower_bound < freq1[i]-freq_sig1[i]*self.cp.max_sigma_range:
range_maximum[0,i] = freq1[i]-freq_sig1[i]*self.cp.max_sigma_range
# Try to make ranges extend by at least sigma * Y times (usually 1) on each side
left_freq = freq1[i]-freq_sig1[i]*self.cp.min_sigma_range
right_freq = freq1[i]+freq_sig1[i]*self.cp.min_sigma_range
if lower_bound > left_freq:
if i == 0:
if left_freq <= np.min(par_hist):
range_maximum[0,i] = np.min(par_hist)
if left_freq > np.min(par_hist):
range_maximum[0,i] = left_freq
else:
if left_freq <= range_maximum[1,i-1]:
range_maximum[0,i] = range_maximum[1,i-1]
if left_freq > range_maximum[1,i-1]:
range_maximum[0,i] = left_freq
if upper_bound < right_freq:
if i == n_maxima-1:
if right_freq >= np.max(par_hist):
range_maximum[1,i] = | np.max(par_hist) | numpy.max |
#####
#this is the file to run the cnn. You have the ability to either use
#the nvidia cnn or the sio cnn.
###
from functions import (detect_edges, detect_line_segments, plot_line_segments,
show_image, extract_data)
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
import csv
import pandas as pd
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
n=3 #1 for nvidia cnn, 2 for sio cnn, 3 for speed model
#keras.models.load_model('model1.hf')
#extract data from folders.
data_dir1 = '../../Data/training_data/training_data'
#first we extract images
file_list = np.asarray(os.listdir(data_dir1))
a=0
x1 = []
for f in file_list:
frame = cv2.imread(data_dir1 + '/' + f)
x1.append(frame)
#print(a)
a+=1
x1=np.asarray(x1)
#print(x1.shape)
#now extract labels
labels = np.genfromtxt('../../Data/training_norm.csv',delimiter = ',')
y1 = labels[1:,1:]
#print(y1.shape)
data_dir2 = '../../Data/captureOVAL-28_02_2020'
x2,y2 = extract_data(data_dir2)
x2 = np.asarray(x2)
#print(x2.shape)
y2[:,0] = (y2[:,0] -50)/80
y2[:,1] = (y2[:,1])/35
#print(y2.shape)
data_dir3 = '../../Data/captureOVAL2-28_02_2020'
x3,y3 = extract_data(data_dir3)
x3 = np.asarray(x3)
#print(x3.shape)
y3[:,0] = (y3[:,0] -50)/80
y3[:,1] = (y3[:,1])/35
#print(y3.shape)
data_dir4 = '../../Data/captureOVAL03_03_2020'
x4,y4 = extract_data(data_dir4)
x4 = np.asarray(x4)
#print(x4.shape)
y4[:,0] = (y4[:,0] -50)/80
y4[:,1] = (y4[:,1])/35
#print(y4.shape)
data_dir5 = '../../Data/human_stopped_data'
x5,y5 = extract_data(data_dir5)
x5 = | np.asarray(x5) | numpy.asarray |
#!/usr/bin/env python
# PROGRAM: plot_sst.py
# ----------------------------------------------------------------------------------
# Version 0.18
# 19 August, 2019
# michael.taylor AT reading DOT ac DOT uk
# PYTHON DEBUGGER CONTROL:
#------------------------
# import os; os._exit(0)
# import ipdb
# ipdb.set_trace()
import os.path
import optparse
from optparse import OptionParser
import sys
import numpy as np
import xarray
import pandas as pd
from pandas import Series, DataFrame, Panel
import seaborn as sns; sns.set(style="darkgrid")
import datetime
import matplotlib
import matplotlib.pyplot as plt; plt.close("all")
#import typhon
#from typhon.plots import plot_bitfield
#cmap = 'tab20c' # https://matplotlib.org/users/colormaps
def calc_median(counts,bins):
"""
# -------------------------------
# CALCULATE MEDIUM FROM HISTOGRAM
# -------------------------------
# M_estimated ~ L_m + [ ( N/2 - F_{m-1} ) / f_m] * c
#
# where,
#
# L_m =lower limit of the median bar
# N = is the total number of observations
# F_{m-1} = cumulative frequency (total number of observations) in all bars below the median bar
# f_m = frequency of the median bar
# c = median bar width
"""
M = 0
counts_cumsum = counts.cumsum()
counts_half = counts_cumsum[-1]/2.0
for i in np.arange(0,bins.shape[0]-1):
counts_l = counts_cumsum[i]
counts_r = counts_cumsum[i+1]
if (counts_half >= counts_l) & (counts_half < counts_r):
c = bins[1]-bins[0]
L_m = bins[i+1]
F_m_minus_1 = counts_cumsum[i]
f_m = counts[i+1]
M = L_m + ( (counts_half - F_m_minus_1) / f_m ) * c
return M
def plot_n_sst(times,n_sst_q3,n_sst_q4,n_sst_q5):
"""
# ---------------------------------------
# PLOT CUMULATIVE SST OBSERVATION DENSITY
# ---------------------------------------
"""
ocean_area = 361900000.0
t = np.array(times, dtype=np.datetime64)
years = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D') / 365.0
Q3 = pd.Series(n_sst_q3, index=times).fillna(0) / ocean_area / years
Q4 = pd.Series(n_sst_q4, index=times).fillna(0) / ocean_area / years
Q5 = pd.Series(n_sst_q5, index=times).fillna(0) / ocean_area / years
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = df['QL=4'] + df['QL=5']
df = df.mask(np.isinf(df))
fig = plt.figure()
plt.plot(times,df['QL=4 & 5'].cumsum(), drawstyle='steps')
plt.plot(times,df['QL=3'].cumsum(), drawstyle='steps')
plt.tick_params(labelsize=12)
plt.ylabel("Observation density / $\mathrm{km^{-2} \ yr^{-1}}$", fontsize=12)
title_str = ' ' + 'QL=3:max=' + "{0:.5f}".format(df['QL=3'].cumsum().max()) + ' ' + 'QL=4 & 5:max=' + "{0:.5f}".format(df['QL=4 & 5'].cumsum().max())
print(title_str)
plt.legend(loc='best')
plt.savefig('n_sst.pdf')
# plt.savefig('n_sst.png', dpi=600)
# plt.savefig('n_sst.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_n_sst_lat(lat_vec,n_sst_q3_lat,n_sst_q4_lat,n_sst_q5_lat):
"""
# ------------------------------------------
# PLOT SST OBSERVATION DENSITY WITH LATITUDE
# ------------------------------------------
"""
interpolation = np.arange(-90,90,1)
multiplier = 1.0
Q3 = multiplier * pd.Series(np.interp(interpolation,lat_vec,n_sst_q3_lat), index=interpolation)
Q4 = multiplier * pd.Series(np.interp(interpolation,lat_vec,n_sst_q4_lat), index=interpolation)
Q5 = multiplier * pd.Series(np.interp(interpolation,lat_vec,n_sst_q5_lat), index=interpolation)
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = df['QL=4'] + df['QL=5']
df['QL=3 & 4 & 5'] = df['QL=3'] + df['QL=4'] + df['QL=5']
df = df.mask(np.isinf(df))
fig = plt.figure()
plt.fill_between(interpolation, df['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(interpolation, df['QL=3'], step="post", alpha=0.4)
plt.plot(interpolation, df['QL=4 & 5'], drawstyle='steps-post', label='QL=4 & 5')
plt.plot(interpolation, df['QL=3'], drawstyle='steps-post', label='QL=3')
ax = plt.gca()
ax.set_xlim([-90,90])
ticks = ax.get_xticks()
ax.set_xticks(np.linspace(-90, 90, 7))
plt.tick_params(labelsize=12)
plt.xlabel("Latitude / $\mathrm{\degree N}$", fontsize=12)
plt.ylabel("Observation density / $\mathrm{km^{-2} \ yr^{-1}}$", fontsize=12)
plt.legend(loc='best')
plt.savefig('n_sst_lat.pdf')
# plt.savefig('n_sst_lat.png', dpi=600)
# plt.savefig('n_sst_lat.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_histogram_sst(sst_midpoints,sst_q3_hist,sst_q4_hist,sst_q5_hist):
"""
# ------------------------------
# PLOT HISTOGRAM OF SST + MEDIAN
# ------------------------------
"""
# interpolation = np.arange(260.05,319.95,0.1) # original bin midpoints
i = np.arange(260,320,0.1) # bin edges
n = len(i)
m = 1.0
q3 = m * pd.Series(np.interp(i,sst_midpoints,sst_q3_hist), index=i)
q4 = m * pd.Series(np.interp(i,sst_midpoints,sst_q4_hist), index=i)
q5 = m * pd.Series(np.interp(i,sst_midpoints,sst_q5_hist), index=i)
dq = pd.DataFrame({'QL=3':q3, 'QL=4':q4, 'QL=5':q5})
dq['QL=4 & 5'] = 0.5 * (dq['QL=4'] + dq['QL=5'])
# dq = dq.mask(np.isinf(df))
M3 = calc_median(dq['QL=3'].values,i[0:n])
M4_5 = calc_median(dq['QL=4 & 5'].values,i[0:n])
interpolation = np.arange(260,320,1) # 10x original resolution
n = len(interpolation)
multiplier = 10.0
Q3 = multiplier * pd.Series(np.interp(interpolation,sst_midpoints,sst_q3_hist), index=interpolation)
Q4 = multiplier * pd.Series(np.interp(interpolation,sst_midpoints,sst_q4_hist), index=interpolation)
Q5 = multiplier * pd.Series(np.interp(interpolation,sst_midpoints,sst_q5_hist), index=interpolation)
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = 0.5 * (df['QL=4'] + df['QL=5'])
# df = df.mask(np.isinf(df))
fig = plt.figure()
plt.fill_between(interpolation,df['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(interpolation,df['QL=3'], step="post", alpha=0.4)
plt.plot(interpolation,df['QL=4 & 5'], drawstyle='steps-post')
plt.plot(interpolation,df['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([260,310])
plt.tick_params(labelsize=12)
plt.xlabel("SST / $\mathrm{K}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ K^{-1}}$", fontsize=12)
title_str = 'SST: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_sst.pdf')
# plt.savefig('hist_sst.png', dpi=600)
# plt.savefig('hist_sst.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_histogram_sensitivity(sensitivity_midpoints,sensitivity_q3_hist,sensitivity_q4_hist,sensitivity_q5_hist):
"""
# ------------------------------------------------
# PLOT HISTOGRAM OF RETRIEVAL SENSITIVITY + MEDIAN
# ------------------------------------------------
"""
# interpolation = np.arange(0.005,1.995,0.01) # original bin midpoints
interpolation = np.arange(0,2,0.01)
n = len(interpolation)
multiplier = 1.0
Q3 = multiplier * pd.Series(np.interp(interpolation,sensitivity_midpoints,sensitivity_q3_hist), index=interpolation)
Q4 = multiplier * pd.Series(np.interp(interpolation,sensitivity_midpoints,sensitivity_q4_hist), index=interpolation)
Q5 = multiplier * pd.Series(np.interp(interpolation,sensitivity_midpoints,sensitivity_q5_hist), index=interpolation)
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = 0.5 * (df['QL=4'] + df['QL=5'])
# df = df.mask(np.isinf(df))
M3 = calc_median(df['QL=3'].values,interpolation[0:n])
M4_5 = calc_median(df['QL=4 & 5'].values,interpolation[0:n])
fig = plt.figure()
plt.fill_between(100.0*interpolation,df['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(100.0*interpolation,df['QL=3'], step="post", alpha=0.4)
plt.plot(100.0*interpolation,df['QL=4 & 5'], drawstyle='steps-post')
plt.plot(100.0*interpolation,df['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([85,110])
plt.tick_params(labelsize=12)
plt.xlabel("Retrieval sensitivity / $\mathrm{\%}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ {\%}^{-1} }$", fontsize=12)
title_str = 'Sensitivity: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_sensitivity.pdf')
# plt.savefig('hist_sensitivity.png', dpi=600)
# plt.savefig('hist_sensitivity.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_histogram_total_uncertainty(total_uncertainty_midpoints,total_uncertainty_q3_hist,total_uncertainty_q4_hist,total_uncertainty_q5_hist):
"""
# --------------------------------------------
# PLOT HISTOGRAM OF TOTAL UNCERTAINTY + MEDIAN
# --------------------------------------------
"""
# interpolation = np.arange(0.005,3.995+0.01,0.01) # original bin midpoints
interpolation = np.arange(0,4,0.01)
n = len(interpolation)
multiplier = 1.0
Q3 = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q3_hist), index=interpolation)
Q4 = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q4_hist), index=interpolation)
Q5 = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q5_hist), index=interpolation)
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = 0.5 * (df['QL=4'] + df['QL=5'])
# df = df.mask(np.isinf(df))
M3 = calc_median(df['QL=3'].values,interpolation[0:n])
M4_5 = calc_median(df['QL=4 & 5'].values,interpolation[0:n])
fig = plt.figure()
plt.fill_between(total_uncertainty_midpoints,df['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(total_uncertainty_midpoints,df['QL=3'], step="post", alpha=0.4)
plt.plot(total_uncertainty_midpoints,df['QL=4 & 5'], drawstyle='steps-post')
plt.plot(total_uncertainty_midpoints,df['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([0.0,1.25])
plt.tick_params(labelsize=12)
plt.xlabel("Total uncertainty / $\mathrm{K}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ cK^{-1}}$", fontsize=12)
title_str = 'Uncertainty: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_total_uncertainty.pdf')
# plt.savefig('hist_total_uncertainty.png', dpi=600)
# plt.savefig('hist_total_uncertainty.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_histogram_total_uncertainty2(total_uncertainty_midpoints,total_uncertainty_q3_hist_avhrr,total_uncertainty_q4_hist_avhrr,total_uncertainty_q5_hist_avhrr,total_uncertainty_q3_hist_atsr,total_uncertainty_q4_hist_atsr,total_uncertainty_q5_hist_atsr):
"""
# --------------------------------------------------------------
# PLOT HISTOGRAM OF TOTAL UNCERTAINTY + MEDIAN FOR AVHRR VS ATSR
# --------------------------------------------------------------
"""
# interpolation = np.arange(0.005,3.995,0.01) # original bin midpoints
interpolation = np.arange(0,4,0.01)
n = len(interpolation)
multiplier = 1.0
Q3_avhrr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q3_hist_avhrr), index=interpolation)
Q4_avhrr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q4_hist_avhrr), index=interpolation)
Q5_avhrr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q5_hist_avhrr), index=interpolation)
df_avhrr = pd.DataFrame({'QL=3':Q3_avhrr, 'QL=4':Q4_avhrr, 'QL=5':Q5_avhrr})
# df_avhrr['QL=4 & 5'] = 0.5 * (df_avhrr['QL=4'] + df_avhrr['QL=5'])
df_avhrr['QL=4 & 5'] = df_avhrr['QL=5']
# df_avhrr = df_avhrr.mask(np.isinf(df_avhrr))
Q3_atsr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q3_hist_atsr), index=interpolation)
Q4_atsr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q4_hist_atsr), index=interpolation)
Q5_atsr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q5_hist_atsr), index=interpolation)
df_atsr = pd.DataFrame({'QL=3':Q3_atsr, 'QL=4':Q4_atsr, 'QL=5':Q5_atsr})
df_atsr['QL=4 & 5'] = 0.5 * (df_atsr['QL=4'] + df_atsr['QL=5'])
# df_atsr = df_atsr.mask(np.isinf(df_atsr))
fig = plt.figure()
plt.fill_between(total_uncertainty_midpoints,df_avhrr['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(total_uncertainty_midpoints,df_avhrr['QL=3'], step="post", alpha=0.4)
plt.plot(total_uncertainty_midpoints,df_avhrr['QL=4 & 5'], drawstyle='steps-post')
plt.plot(total_uncertainty_midpoints,df_avhrr['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([0.0,1.25])
plt.tick_params(labelsize=12)
plt.xlabel("Total uncertainty / $\mathrm{K}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ cK^{-1}}$", fontsize=12)
M3 = calc_median(df_avhrr['QL=3'].values,interpolation[0:n])
M4_5 = calc_median(df_avhrr['QL=4 & 5'].values,interpolation[0:n])
title_str = 'AVHRR: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_total_uncertainty_avhrr.pdf')
# plt.savefig('hist_total_uncertainty_avhrr.png', dpi=600)
# plt.savefig('hist_total_uncertainty_avhrr.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
fig = plt.figure()
plt.fill_between(total_uncertainty_midpoints,df_atsr['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(total_uncertainty_midpoints,df_atsr['QL=3'], step="post", alpha=0.4)
plt.plot(total_uncertainty_midpoints,df_atsr['QL=4 & 5'], drawstyle='steps-post')
plt.plot(total_uncertainty_midpoints,df_atsr['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([0.0,1.25])
plt.tick_params(labelsize=12)
plt.xlabel("Total uncertainty / $\mathrm{K}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ cK^{-1}}$", fontsize=12)
M3 = calc_median(df_atsr['QL=3'].values,interpolation[0:n])
M4_5 = calc_median(df_atsr['QL=4 & 5'].values,interpolation[0:n])
title_str = 'ATSR: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_total_uncertainty_atsr.pdf')
# plt.savefig('hist_total_uncertainty_atsr.png', dpi=600)
# plt.savefig('hist_total_uncertainty_atsr.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def calc_n_sst_timeseries(satellites):
"""
# ---------------------------------------------------------------
# CALC MEAN OF TIMESERIES OF DAILY OBSERVATION DENSITY PER SENSOR
# ---------------------------------------------------------------
"""
ocean_area = 361900000.0
labels = ['ATSR1','ATSR2','AATSR','NOAA07','NOAA09','NOAA11','NOAA12','NOAA14','NOAA15','NOAA16','NOAA17','NOAA18','NOAA19','METOPA']
satellites = ['ATSR1','ATSR2','AATSR','AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G']
df_all = pd.DataFrame()
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area
n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area
n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area
df = DataFrame({'Q3' : n_sst_q3, 'Q4' : n_sst_q4, 'Q5' : n_sst_q5})
df['Sum'] = df['Q4'] + df['Q5']
df_all = df_all.append(df,ignore_index=True)
satellites_avhrr = ['AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G']
df_avhrr = pd.DataFrame()
for i in range(0,len(satellites_avhrr)):
filename = satellites_avhrr[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area
n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area
n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area
df = DataFrame({'Q3' : n_sst_q3, 'Q4' : n_sst_q4, 'Q5' : n_sst_q5})
df['Sum'] = df['Q4'] + df['Q5']
df_avhrr = df_avhrr.append(df,ignore_index=True)
satellites_atsr = ['AATSR','ATSR1','ATSR2']
df_atsr = pd.DataFrame()
for i in range(0,len(satellites_atsr)):
filename = satellites_atsr[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area
n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area
n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area
df = DataFrame({'Q3' : n_sst_q3, 'Q4' : n_sst_q4, 'Q5' : n_sst_q5})
df['Sum'] = df['Q4'] + df['Q5']
df_atsr = df_atsr.append(df,ignore_index=True)
return df_all, df_avhrr, df_atsr
def plot_n_sst_timeseries(satellites):
"""
# -------------------------------------------------------
# PLOT TIMESERIES OF DAILY OBSERVATION DENSITY PER SENSOR
# -------------------------------------------------------
"""
ocean_area = 361900000.0
labels = ['ATSR1','ATSR2','AATSR','NOAA07','NOAA09','NOAA11','NOAA12','NOAA14','NOAA15','NOAA16','NOAA17','NOAA18','NOAA19','METOPA']
satellites = ['ATSR1','ATSR2','AATSR','AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G']
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
lab = []
ncolors = len(satellites)
ax1.set_prop_cycle('color',[plt.cm.gnuplot2(j) for j in np.linspace(0, 1, ncolors)])
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area
n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area
df = DataFrame({'Q4' : n_sst_q4, 'Q5' : n_sst_q5})
df['Sum'] = df['Q4'] + df['Q5']
# df['Sum'] = df['Q4'].fillna(0) + df['Q5'].fillna(0)
# df['Sum_mean'] = df['Sum'].resample("1d").sum().fillna(0).rolling(window=31, min_periods=1).median()
# df['Sum_mean'].plot(ax=ax1)
lab.append(labels[i])
ax1.plot(times, df['Sum'], '.', markersize=0.2)
ax1.set_ylim([0,18])
print(labels[i] + "," + str(df['Sum'].mean()) + "," + str(df['Sum'].shape[0]))
plt.tick_params(labelsize=12)
title_str = 'QL=4 & 5'
ax1.set_title(title_str, fontsize=10)
lab = []
ncolors = len(satellites)
ax2.set_prop_cycle('color',[plt.cm.gnuplot2(j) for j in np.linspace(0, 1, ncolors)])
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area
df = DataFrame({'Q3' : n_sst_q3})
# df['Q3_mean'] = df['Q3'].resample("1d").sum().rolling(window=31, min_periods=1).median()
# df['Q3_mean'].plot(ax=ax2)
lab.append(labels[i])
ax2.plot(times, df['Q3'], '.', markersize=0.2)
ax2.set_ylim([0,18])
print(labels[i] + "," + str(df['Q3'].mean()) + "," + str(df['Q3'].shape[0]))
plt.tick_params(labelsize=12)
title_str = 'QL=3'
ax2.set_title(title_str, fontsize=10)
fig.legend(lab, fontsize=8, loc=7, markerscale=20, scatterpoints=5)
fig.subplots_adjust(right=0.8)
fig.text(0.01, 0.5, 'Observation density / $\mathrm{km^{-2} \ yr^{-1}}$', va='center', rotation='vertical')
plt.savefig('n_sst_timeseries.pdf')
# plt.savefig('n_sst_timeseries.png', dpi=600)
# plt.savefig('n_sst_timeseries.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_n_sst_boxplots(satellites):
"""
# --------------------------------------------------------------
# PLOT YEARLY BOXPLOTS FROM DAILY OBSERVATION DENSITY PER SENSOR
# --------------------------------------------------------------
"""
ocean_area = 361900000.0
labels = ['ATSR1','ATSR2','AATSR','NOAA07','NOAA09','NOAA11','NOAA12','NOAA14','NOAA15','NOAA16','NOAA17','NOAA18','NOAA19','METOPA']
satellites = ['ATSR1','ATSR2','AATSR','AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G']
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area
n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area
df = DataFrame({'Q4' : n_sst_q4, 'Q5' : n_sst_q5})
df['Sum'] = df['Q4'] + df['Q5']
fig, ax = plt.subplots(figsize=(12,5))
ts = pd.Series(df['Sum'].values, index=times)
sns.boxplot(ts.index.month, ts, ax=ax)
title_str = 'QL=4 & 5:' + labels[i]
ax.set_ylabel('Observation density / $\mathrm{km^{-2} \ yr^{-1}}$')
ax.set_title(title_str, fontsize=10)
file_str = 'n_sst_boxplot_' + labels[i] + '_QL4_5' + '.pdf'
# file_str = 'n_sst_boxplot_' + labels[i] + '_QL4_5' + '.png'
# file_str = 'n_sst_boxplot_' + labels[i] + '_QL4_5' + '.eps'
plt.savefig(file_str)
# plt.savefig(file_str, dpi=600)
# plt.savefig(file_str, format='eps', rasterized=True, dpi=1200)
plt.close("all")
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area
df = DataFrame({'Q3' : n_sst_q3})
fig, ax = plt.subplots(figsize=(12,5))
ts = pd.Series(df['Q3'].values, index=times)
sns.boxplot(ts.index.month, ts, ax=ax)
title_str = 'QL=3:' + labels[i]
ax.set_ylabel('Observation density / $\mathrm{km^{-2} \ yr^{-1}}$')
ax.set_title(title_str, fontsize=10)
file_str = 'n_sst_boxplot_' + labels[i] + '_QL3' '.pdf'
# file_str = 'n_sst_boxplot_' + labels[i] + '_QL3' '.png'
# file_str = 'n_sst_boxplot_' + labels[i] + '_QL3' '.eps'
plt.savefig(file_str)
# plt.savefig(file_str, dpi=600)
# plt.savefig(file_str, format='eps', rasterized=True, dpi=1200)
plt.close("all")
def calc_lat_fraction():
"""
# ---------------------------------------------------------------
# EXTRACT OCEAN FRACTION WITH LATITUDE FROM L4 OSTIA LANDSEA MASK
# ---------------------------------------------------------------
"""
# mask:source = "NAVOCEANO_landmask_v1.0 EUMETSAT_OSI-SAF_icemask ARCLake_lakemask"
# mask:comment = "water land lake ice"
# mask:flag_masks = 1b, 2b, 4b, 8b, 16b
# mask:summary = "OSTIA L4 product from the ESA SST CCI project, produced using OSTIA reanalysis sytem v3.0"
ds = xarray.open_dataset('landsea_mask.nc')
x = ds.lon
y = ds.lat
z = ds.mask
water = z==1
land = z==2
water_ice = z==9
# water only = 52.42%
# land only = 33.67%
# water + ice = 13.91%
f = 1 - (np.sum(land[0,:,:],axis=1) / len(x)*1.)
lat_vec = y
lat_fraction = f
# exec(open('plot_landsea_mask.py').read())
return lat_vec, lat_fraction
def load_data(lat_vec, lat_fraction):
#
# Rescale ocean_area to total area of surface in each latitude zone
#
ocean_area = 361900000.0 # ETOPO1: km2
R = 6371.0088 # km
# Formula for the area of the Earth between a line of latitude and the north pole (the area of a spherical cap): A = 2*pi*R*h where R is the radius of the earth and h is the perpendicular distance from the plane containing the line of latitude to the pole. We can calculate h using trigonometry: h = R*(1-sin(lat)). The area north of a line of latitude is therefore: A = 2*pi*R^2(1-sin(lat)).
# The area between two lines of latitude is the difference between the area north of one latitude and the area north of the other latitude: A = |2*pi*R^2(1-sin(lat2)) - 2*pi*R^2(1-sin(lat1)) = 2*pi*R^2 |sin(lat1) - sin(lat2)
# The area of a lat-long rectangle is proportional to the difference in the longitudes. The area I just calculated is the area between longitude lines differing by 360 degrees. Therefore the area we seek is: A = 2*pi*R^2 |sin(lat1)-sin(lat2)| |lon1-lon2|/360 = (pi/180)R^2 |sin(lat1)-sin(lat2)| |lon1-lon2|
dlat = 0.05
A = []
N = len(lat_vec)
for i in range(N):
dA = 2. * np.pi * R**2.0 * np.absolute( np.sin(np.pi/180 * (lat_vec[\
i]+dlat/2)) - np.sin(np.pi/180 * (lat_vec[i]-dlat/2)))
A.append(dA)
surface_vec = np.array(A)
ocean_vec = surface_vec * np.array(lat_fraction)
FPE = 100. * (1.0 - np.sum(ocean_vec) / ocean_area)
print('FPE(ETOPO1,ocean_area)=', FPE)
fig, ax = plt.subplots()
plt.plot(lat_vec, surface_vec, label='surface area')
plt.plot(lat_vec, ocean_vec, label='ocean')
plt.legend()
plt.xlabel('Latitude / degrees')
plt.ylabel(r'Area / $km^{2}$')
# title_str = "ETOPO1 ocean_area=" + "{0:.3e}".format(ocean_area) + " calculated=" + "{0:.3e}".format(np.sum(ocean_vec))
# plt.title(title_str)
file_str = "ocean_area.png"
fig.tight_layout()
plt.savefig('ocean_area.pdf')
# plt.savefig('ocean_area.png', dpi=600)
# plt.savefig('ocean_area.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
satellites = ['AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G','AATSR','ATSR1','ATSR2']
df = []
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
dsi = xarray.open_dataset(filename)
df.append(dsi)
dsi = []
ds = xarray.concat(df, dim='time')
df = []
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
dates = []
days = []
times_duplicates = []
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
idx = []
n_sst_q3 = Q3_duplicates.groupby(Q3_duplicates.index).sum()
n_sst_q4 = Q4_duplicates.groupby(Q4_duplicates.index).sum()
n_sst_q5 = Q5_duplicates.groupby(Q5_duplicates.index).sum()
Q3_duplicates = []
Q4_duplicates = []
Q5_duplicates = []
#
# CALCULATE CLEAR SKY FRACTION
#
# water only = 52.42% / land only = 33.67% / water + ice = 13.91% / non-land = 66.33%
n_ocean = (0.5242 + 0.1391) * 3600 * 7200 * len(times)
n_q3 = np.sum(n_sst_q3)
n_q4 = np.sum(n_sst_q4)
n_q5 = np.sum(n_sst_q5)
clearsky_q3 = n_q3 / n_ocean
clearsky_q4 = n_q4 / n_ocean
clearsky_q5 = n_q5 / n_ocean
#
# SLICE BY LATITUDE
#
# NB: change: * years rather than / years
n_sst_q3_lat = np.sum(ds['n_sst_q3_lat'],axis=0)[0:3600,] / np.array((lat_fraction * surface_vec) * years)
n_sst_q4_lat = np.sum(ds['n_sst_q4_lat'],axis=0)[0:3600,] / np.array((lat_fraction * surface_vec) * years)
n_sst_q5_lat = np.sum(ds['n_sst_q5_lat'],axis=0)[0:3600,] / np.array((lat_fraction * surface_vec) * years)
gd_q3 = np.isfinite(np.array(n_sst_q3_lat))
gd_q4 = np.isfinite(np.array(n_sst_q4_lat))
gd_q5 = np.isfinite(np.array(n_sst_q5_lat))
n_sst_q3_lat_mean = np.array(n_sst_q3_lat)[gd_q3].mean()
n_sst_q4_lat_mean = np.array(n_sst_q4_lat)[gd_q4].mean()
n_sst_q5_lat_mean = np.array(n_sst_q5_lat)[gd_q5].mean()
print('n_sst_q3_lat_mean=', n_sst_q3_lat_mean)
print('n_sst_q4_lat_mean=', n_sst_q4_lat_mean)
print('n_sst_q5_lat_mean=', n_sst_q5_lat_mean)
#
# CONCATENATE HISTOGRAMS
#
sst_midpoints = ds['sst_midpoints']
sst_q3_hist = 100.0 * np.sum(ds['sst_q3_hist'],axis=0) / np.sum(np.sum(ds['sst_q3_hist'],axis=0))
sst_q4_hist = 100.0 * np.sum(ds['sst_q4_hist'],axis=0) / np.sum(np.sum(ds['sst_q4_hist'],axis=0))
sst_q5_hist = 100.0 * np.sum(ds['sst_q5_hist'],axis=0) / np.sum(np.sum(ds['sst_q5_hist'],axis=0))
sensitivity_midpoints = ds['sensitivity_midpoints']
sensitivity_q3_hist = 100.0 * np.sum(ds['sensitivity_q3_hist'],axis=0) / np.sum(np.sum(ds['sensitivity_q3_hist'],axis=0))
sensitivity_q4_hist = 100.0 * np.sum(ds['sensitivity_q4_hist'],axis=0) / np.sum(np.sum(ds['sensitivity_q4_hist'],axis=0))
sensitivity_q5_hist = 100.0 * np.sum(ds['sensitivity_q5_hist'],axis=0) / np.sum(np.sum(ds['sensitivity_q5_hist'],axis=0))
total_uncertainty_midpoints = ds['total_uncertainty_midpoints']
total_uncertainty_q3_hist = 100.0 * | np.sum(ds['total_uncertainty_q3_hist'],axis=0) | numpy.sum |
'''
###############################################################################
"MajoranaNanowire" Python3 Module
v 1.0 (2020)
Created by <NAME> (2018)
###############################################################################
"Function" submodule
This sub-package contains some functions required for the "Hamiltonian"
sub-package.
###############################################################################
'''
#%%############################################################################
######################## Required Packages ############################
###############################################################################
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
import scipy.linalg
from scipy import constants
from MajoranaNanowires.third_functions import pfaffian as pf
#%% ############################# Functions
#%%
def FermiDirac(E,kT,mu=0):
"""
Computes the Fermi-Dirac distribution.
Parameters
----------
E: scalar or arr
Energies.
kT: scalar
Temperature (in units of energy).
mu: scalar or arr
Fermi energy.
Returns
-------
result: scalar or arr
Fermi-Dirac distribution for the given energies.
"""
np.seterr(over='ignore')
np.seterr(divide='ignore')
return (1/(1+np.exp((E-mu)/kT)))
#%%
def density_TF(phi,kT=0,E_F=0,material='InAs',band='conduction',Vz=0):
"""
Computes the charge density of a 3D (free) electron gas in the Thomas-Fermi
approximation.
Parameters
----------
phi: scalar or arr
Electrostatic energy.
kT: scalar
Temperature (in units of energy).
E_F: scalar or arr
Fermi energy.
material: str or dic
Material for which is evaluated. For a general material,
'material' is a dictionary with arguments m_eff (conduction
effective mass), m_eff_hh (heavy hole effective mass), m_eff_lh
(light hole effective mass), and E_gap (semiconductor gap). These
parameters are already saved in this function for InAs and InSb,
which can be chosen by choosing material='InAs' or 'InSb',
resprectively.
band: str
Whether to include 'conduction', 'valence' or 'both' bands in the
calculations.
Vz: scalar
Zeeman splitting.
Returns
-------
den: scalar or arr
Charge density in the Thomas-Fermi approximation for the given
electrostatic energies.
"""
np.seterr(invalid='ignore')
if material=='InAs':
m_eff=0.023
m_eff_hh=0.41
m_eff_lh=0.026
E_gap=418
elif material=='InSb':
m_eff=0.015
m_eff_hh=0.43
m_eff_lh=0.015
E_gap=170
else:
if 'E_gap' in material:
material['m_eff'], material['m_eff_hh'], material['m_eff_lh'], material['E_gap'] = m_eff, m_eff_hh, m_eff_lh, E_gap
else:
material['m_eff'] = m_eff
if band=='conduction':
if Vz==0:
den_e=-1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F)*1e-3*constants.e*FermiDirac(-phi-E_F,kT))/constants.hbar)**3*1e-27
den=np.nan_to_num(den_e,0)
else:
den_e=-1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F+Vz)*1e-3*constants.e*FermiDirac(-phi-E_F-Vz,kT))/constants.hbar)**3*1e-27
den_e=den_e-1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F-Vz)*1e-3*constants.e*FermiDirac(-phi-E_F+Vz,kT))/constants.hbar)**3*1e-27
den=np.nan_to_num(den_e,0)
elif band=='valence':
if Vz==0:
den_hh=1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap,kT))/constants.hbar)**3*1e-27
den_lh=1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap,kT))/constants.hbar)**3*1e-27
den=np.nan_to_num(den_hh+den_lh,0)
else:
den_hh=1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F-Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap+Vz,kT))/constants.hbar)**3*1e-27
den_hh=den_hh+1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F+Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap-Vz,kT))/constants.hbar)**3*1e-27
den_lh=1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F-Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap+Vz,kT))/constants.hbar)**3*1e-27
den_lh=den_lh+1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F+Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap-Vz,kT))/constants.hbar)**3*1e-27
den=np.nan_to_num(den_hh+den_lh,0)
elif band=='both':
if Vz==0:
den_e=-1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F)*1e-3*constants.e*FermiDirac(-phi-E_F,kT))/constants.hbar)**3*1e-27
den_e=np.nan_to_num(den_e,0)
den_hh=1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap,kT))/constants.hbar)**3*1e-27
den_lh=1.0/(3*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap,kT))/constants.hbar)**3*1e-27
den_h=np.nan_to_num(den_hh+den_lh,0)
den=den_e+den_h
else:
den_e=-1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F+Vz)*1e-3*constants.e*FermiDirac(-phi-E_F-Vz,kT))/constants.hbar)**3*1e-27
den_e=den_e-1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff*constants.m_e*np.abs(phi+E_F-Vz)*1e-3*constants.e*FermiDirac(-phi-E_F+Vz,kT))/constants.hbar)**3*1e-27
den_e=np.nan_to_num(den_e,0)
den_hh=1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F-Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap+Vz,kT))/constants.hbar)**3*1e-27
den_hh=den_hh+1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_hh*constants.m_e*np.abs(-phi-E_gap-E_F+Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap-Vz,kT))/constants.hbar)**3*1e-27
den_lh=1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F-Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap+Vz,kT))/constants.hbar)**3*1e-27
den_lh=den_lh+1.0/(6*constants.pi**2)*(np.sqrt(2*m_eff_lh*constants.m_e*np.abs(-phi-E_gap-E_F+Vz)*1e-3*constants.e*FermiDirac(phi+E_F+E_gap-Vz,kT))/constants.hbar)**3*1e-27
den_h=np.nan_to_num(den_hh+den_lh,0)
den=den_e+den_h
return (den)
#%% ############################# Array manipulation
#%%
def order_eig(E,U=0,sparse='yes',BdG='yes'):
"""
Order the eigenfunctions from smaller to larger. If BdG==yes and
sparse==yes, it also ensures that there are the same number of positive
eigenvalues than negative.
Parameters
----------
E: arr
Eigenvalues.
U: arr
Eigenvectors.
sparse: {'yes','no'}
Whether the eigenspectrum has been computed from a sparse matrix.
BdG: {'yes','no'}
Whether the eigenspectrum must have BdG symmetry or not.
Returns
-------
E, U: arrs
Eigenspectrum ordered from smaller to larger eigenvalues.
"""
n_eig=len(E)
if np.isscalar(U):
if BdG=='yes':
if sparse=='yes':
idx = np.argsort(E)
E = E[idx]
if (np.abs(E[0]+E[n_eig-1])>0.00001)and(np.sign(E[0]+E[n_eig-1])==1):
E[n_eig-1]=-E[n_eig-2]
elif (np.abs(E[0]+E[n_eig-1])>0.00001)and(np.sign(E[0]+E[n_eig-1])==-1):
E[0]=-E[1]
idx = np.argsort(E)
return (idx)
else:
if BdG=='yes':
if sparse=='yes':
idx = np.argsort(E)
E = E[idx]
U = U[:,idx]
if (np.abs(E[0]+E[n_eig-1])>0.00001)and(np.sign(E[0]+E[n_eig-1])==1):
E[n_eig-1]=-E[n_eig-2]
elif (np.abs(E[0]+E[n_eig-1])>0.00001)and(np.sign(E[0]+E[n_eig-1])==-1):
E[0]=-E[1]
idx = np.argsort(E)
E = E[idx]
U = U[:,idx]
return (E),(U)
#%%
def length(vec):
"""
Length of a given vector. If vec is an scalar, its length is 1.
Parameters
----------
vec: scalar or arr
Input vector
Returns
-------
length: int
Length of vec. If vec is an scalar, its length is 1.
"""
if np.ndim(vec)==0:
length=1
else:
length=len(vec)
return length
#%%
def diagonal(N,k=0,init=0,step=1):
"""
Indices of some diagonal of a given marix. It is more efficient than its
numpy counterpart.
Parameters
----------
N: int
Length of the diagonal (number of elements).
k: int
Offset of the off-diagonal. k=0 is the main diagonal, k>0 is a
diagonal in the upper-part of the Hamiltonian, and k<0 in the
lower one.
init: int
The starting element of the diagonal.
step: int
The step between elements in the diagonal.
Returns
-------
indices: tuple of arr
Indices of the diagonal. The first element of the tuple are the
row elements, and the second one are the column ones.
"""
assert np.isscalar(k), 'The offset k must be a scalar'
if k==0:
indices=(np.arange(init,N,step=step),np.arange(init,N,step=step))
elif k>0:
indices=(np.arange(init,N-k,step=step),np.arange(init,N-k,step=step)+k)
elif k<0:
indices=(np.arange(init,N+k,step=step)-k,np.arange(init,N+k,step=step))
return(indices)
#%%
def concatenate(arg):
"""
Concatenate a list of arrays.
Parameters
----------
arg: tuple or list of arr
List of arrays to be concatenated.
Returns
-------
con: arr or list
Array or list of the concatenated list.
"""
if isinstance(arg[0],tuple) and len(arg[0])==2:
index_1, index_2 = np.array([]), np.array([])
for i in range(len(arg)):
index_1 = np.append(index_1,arg[i][0])
index_2 = np.append(index_2,arg[i][1])
indices=(index_1,index_2)
else:
indices=np.concatenate(arg)
return(indices)
#%%
def between(arg, interval):
"""
Computes whether a given number is between a given interval or not.
Parameters
----------
arg: scalar
Number to be evaluated.
interval: tuple
Interval in which perform the evaluation.
Returns
-------
result: bool
If arg is between interval, result=True, and result=False in other
case.
"""
if arg>=interval[0] and arg<=interval[1]:
result=True
else:
result=False
return(result)
#%%
def arg_isclose(vec,val):
"""
Find the index of a given vector that corresponds to the element of the
array "vec" which is closest to to an specific value "val".
Parameters
----------
vec: arr
Array in which it is desired to find the closest element.
val: scalar
Closest value.
Returns
-------
result: int
Index of the element of vec closest to val.
"""
arg=np.argmin(np.abs(vec-val))
return(arg)
#%% ############################# Constructors or extractors
#%%
def build_mesh(N,L,mesh_type='regular',fact=0.5,asym=1):
"""
Build a 2D inhomogeneous rectangular mesh.
Parameters
----------
N: arr
Number of sites in each direction.
L: arr
Length en each direction.
mesh_type: str
Whether to build a 'regular' mesh, or an inhomogeneous one with a
discretization given by a 'geometric' distribution, an 'exponential'
separation, or a 'random' one.
fact: scalar
Factor which regulates the separations between sites.
asym: scalar
The asymmetry between the factors applied for the x and y direction.
Returns
-------
x, y: mesh
Mesh in the x and y directions.
dis: mesh
Mesh with the discretization in each point.
"""
if mesh_type=='regular':
x, y = np.linspace(-L[1]/2,L[1]/2,N[0]), np.linspace(-L[0]/2,L[0]/2,N[1])
dis=np.array([np.abs(x[1]-x[0]),np.abs(y[1]-y[0])])
x,y=np.meshgrid(x,y,indexing='ij')
return (x,y,dis)
elif mesh_type=='geometric':
xm,ym=np.zeros(N), np.zeros(N)
dis_m=np.array([np.zeros(N),np.zeros(N)])
for i in range(N[0]):
for j in range(N[1]):
xm[i,j]=(L[0]/2*fact**np.abs(i-int((N[0]-1)/2))-L[0]/2)*np.sign(i-int((N[0]-1)/2))*(L[0]/(L[0]/2*fact**np.abs(0-int((N[0]-1)/2))-L[0]/2)/2)
ym[i,j]=(L[1]/2*fact**np.abs(j-int((N[1]-1)/2))-L[1]/2)*np.sign(j-int((N[1]-1)/2))*(L[1]/(L[1]/2*fact**np.abs(0-int((N[1]-1)/2))-L[1]/2)/2)
for i in range(N[0]):
for j in range(N[1]):
if not(j==0 or j==N[1]-1):
dis_m[1,i,j]=np.abs(ym[i,j+1]-ym[i,j])/2+np.abs(ym[i,j-1]-ym[i,j])/2
if not(i==0 or i==N[0]-1):
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i-1,j])/2+np.abs(xm[i,j]-xm[i+1,j])/2
if i==0:
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i+1,j])
elif i==N[0]-1:
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i-1,j])
if j==0:
dis_m[1,i,j]=np.abs(ym[i,j]-ym[i,j+1])
elif j==N[1]-1:
dis_m[1,i,j]=np.abs(ym[i,j]-ym[i,j-1])
return (xm,ym,dis_m)
elif mesh_type=='exponential':
np.seterr(all='ignore')
xm,ym=np.zeros(N), np.zeros(N)
dis_m=np.array([np.zeros(N),np.zeros(N)])
for i in range(N[0]):
for j in range(N[1]):
xm[i,j]=(1-np.exp(-np.abs(i-int((N[0]-1)/2))*fact))*np.sign(i-int((N[0]-1)/2))*(1-np.exp(-np.abs(N[0]-int((N[0]-1)/2))*fact))**(-1)*L[0]/2
ym[i,j]=(1-np.exp(-np.abs(j-int((N[1]-1)/2))*fact/asym))*np.sign(j-int((N[1]-1)/2))*(1-np.exp(-np.abs(N[1]-int((N[1]-1)/2))*fact/asym))**(-1)*L[1]/2
for i in range(N[0]):
for j in range(N[1]):
if not(j==0 or j==N[1]-1):
dis_m[1,i,j]=np.abs(ym[i,j+1]-ym[i,j])/2+np.abs(ym[i,j-1]-ym[i,j])/2
if not(i==0 or i==N[0]-1):
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i-1,j])/2+np.abs(xm[i,j]-xm[i+1,j])/2
if i==0:
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i+1,j])
elif i==N[0]-1:
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i-1,j])
if j==0:
dis_m[1,i,j]=np.abs(ym[i,j]-ym[i,j+1])
elif j==N[1]-1:
dis_m[1,i,j]=np.abs(ym[i,j]-ym[i,j-1])
return (xm,ym,dis_m)
elif mesh_type=='random':
x,y,dis=build_mesh(N,L,mesh_type='regular')
xm,ym=np.zeros(N), np.zeros(N)
dis_m=np.array([np.zeros(N),np.zeros(N)])
for i in range(N[0]):
for j in range(N[1]):
xp, yp = x[:,0]+(np.random.rand(N[0])-0.5)*dis[0]*fact, y[0,:]+(np.random.rand(N[0])-0.5)*dis[1]*fact
xm[i,j],ym[i,j]=xp[i],yp[j]
for i in range(N[0]):
for j in range(N[1]):
if not(j==0 or j==N[1]-1):
dis_m[1,i,j]=np.abs(ym[i,j+1]-ym[i,j])/2+np.abs(ym[i,j-1]-ym[i,j])/2
if not(i==0 or i==N[0]-1):
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i-1,j])/2+np.abs(xm[i,j]-xm[i+1,j])/2
if i==0:
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i+1,j])
elif i==N[0]-1:
dis_m[0,i,j]=np.abs(xm[i,j]-xm[i-1,j])
if j==0:
dis_m[1,i,j]=np.abs(ym[i,j]-ym[i,j+1])
elif j==N[1]-1:
dis_m[1,i,j]=np.abs(ym[i,j]-ym[i,j-1])
return (xm,ym,dis_m)
#%%
def get_potential(phi_in,x,y,z,symmetry='none',mesh_type='none'):
"""
Obtain the potential from a function for a given sites.
Parameters
----------
phi_in: fun
Fenics function of the electrostatic potential.
x,y,z: arr
Points in which evaluate the potential.
symmetry: {'none','x','y','z','full-shell'}
Imposed symmetry of the potential.
mesh_type:___
______________________________
Returns
-------
phi_out: arr
Electrostatic potential in the sites given by x,y,z.
"""
phi_out=np.zeros((len(x),len(y),len(z)))
if symmetry=='none':
for i in range(len(x)):
for j in range(len(y)):
for k in range(len(z)):
phi_out[i,j,k]=phi_in(x[i],y[j],z[k])
elif symmetry=='y':
if mesh_type=='none':
for i in range(len(x)):
for j in range(int((len(y)-1)/2)+1):
for k in range(len(z)):
phi_out[i,j,k]=phi_in(x[i],y[j],z[k])
phi_out[i,len(y)-j-1,k]=phi_out[i,j,k]
elif mesh_type=='yz-mesh':
for i in range(len(x)):
for j in range(int((len(y[:,0])-1)/2)+1):
for k in range(len(z[0,:])):
phi_out[i,j,k]=phi_in(x[i],y[j,k],z[j,k])
phi_out[i,len(y[:,0])-j-1,k]=phi_out[i,j,k]
elif symmetry=='yz':
for i in range(len(x)):
for j in range(int((len(y)-1)/2)+1):
for k in range(int((len(z)-1)/2)+1):
phi_out[i,j,k]=phi_in(x[i],y[j],z[k])
phi_out[i,len(y)-j-1,k]=phi_out[i,j,k]
phi_out[i,j,len(z)-k-1]=phi_out[i,j,k]
phi_out[i,len(y)-j-1,len(z)-k-1]=phi_out[i,j,k]
elif symmetry=='xy':
for i in range(int((len(x)-1)/2)+1):
for j in range(int((len(y)-1)/2)+1):
for k in range(len(z)):
phi_out[i,j,k]=phi_in(x[i],y[j],z[k])
phi_out[i,len(y)-j-1,k]=phi_out[i,j,k]
phi_out[len(x)-i-1,j,k]=phi_out[i,j,k]
phi_out[len(x)-i-1,len(y)-j-1,k]=phi_out[i,j,k]
elif symmetry=='x':
for i in range(int((len(x)-1)/2)+1):
for j in range(len(y)):
for k in range(len(z)):
phi_out[i,j,k]=phi_in(x[i],y[j],z[k])
phi_out[len(x)-i-1,j,k]=phi_out[i,j,k]
elif symmetry=='full-shell':
for i in range(len(x)):
for j in range(int((len(y)-1)/2)+1):
for k in range(len(z)):
if (z[k]>=0) and (y[j]<=z[k]/np.tan(np.pi/3)) and (y[j]>=-z[k]/np.tan(np.pi/3)):
phi_out[i,j,k]=phi_in(x[i],y[j],z[k])
phi_out[i,len(y)-j-1,k]=phi_out[i,j,k]
for l in range(1,4):
phi_out[i,int(round((j-25)*np.cos(np.pi/3*l)-(k-25)*np.sin(np.pi/3*l)))+25,int(round((j-25)*np.sin(np.pi/3*l)+(k-25)*np.cos(np.pi/3*l)))+25]=phi_out[i,j,k]
phi_out[i,int(round((len(y)-j-1-25)*np.cos(np.pi/3*l)-(k-25)*np.sin(np.pi/3*l)))+25,int(round((len(y)-j-1-25)*np.sin(np.pi/3*l)+(k-25)*np.cos(np.pi/3*l)))+25]=phi_out[i,j,k]
for j in range(int((len(y)-1)/2)+1):
for k in range(len(z)):
if phi_out[i,j,k]==0:
phi_out[i,j,k]=phi_out[i,int(j+1),k]
for j in range(int((len(y)-1)/2)+1):
for k in range(len(z)):
if phi_out[i,j,k]==0:
phi_out[i,j,k]=phi_out[i,int(j+2),k]
phi_out[i,len(y)-j-1,k]=phi_out[i,j,k]
return (phi_out)
#%%
def get_ElectricField(phi,x,y,z):
"""
Obtain the electric field of a given electrostatic potential.
Parameters
----------
phi: arr
Electrostatic potential.
x,y,z: arr
Points in which it is evaluated the potential.
Returns
-------
E: arr
Electric field of phi. Each element E[i] is the electric field in
each direction.
"""
dis=np.array([np.abs(x[1]-x[0]),np.abs(y[1]-y[0]),np.abs(z[1]-z[0])])
if np.ndim(phi)==3:
Ex, Ey, Ez = np.gradient(phi,dis[0],dis[1],dis[2])
return (np.array([Ex,Ey,Ez]))
elif np.ndim(phi)==2:
Ey, Ez = np.gradient(phi,dis[1],dis[2])
return (np.array([Ey,Ez]))
elif np.ndim(phi)==1:
Ex = np.gradient(phi,dis)
return (Ex)
#%% ############################# Modifiers
#%%
def mask_hexagonal(fun_in,y,z,x=0,change=np.nan,mesh_type='regular'):
"""
Hexagonal mask. This function change the values for those points of fun_in
which are outside the hexagonal section.
Parameters
----------
fun_in: arr
Function to be masked.
y,z: arr
Points of the section in which it is evaluated the function.
x: arr
Points of the length in which it is evaluated the function. If x=0,
then it is only evaluated in 2D.
change: value
Value to which change those points outside of the hexagonal section.
Returns
-------
fun_out: arr
Masked function.
"""
if np.isscalar(x):
if mesh_type=='regular':
Ny, Nz = len(y), len(z)
Ly, Lz = y[Ny-1]*2, z[Nz-1]*2
a0=Ly/2
b0=a0*np.sin(np.pi/3)
fun_out=np.zeros((len(y),len(z)))
for j in range(Ny):
for k in range(Nz):
if not(between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0))):
fun_out[j,k]=change
else:
fun_out[j,k]=fun_in[j,k]
else:
Ny, Nz = len(y[:,0]), len(z[0,:])
Ly, Lz = y[Ny-1,0]*2, z[0,Nz-1]*2
a0=Ly/2
b0=a0*np.sin(np.pi/3)
fun_out=np.zeros((Ny,Nz))
for j in range(Ny):
for k in range(Nz):
if not(between(z[j,k], (-b0,b0)) and between(z[j,k],(2*b0/a0*y[j,k]-2*b0,-2*b0/a0*y[j,k]+2*b0)) and between(z[j,k],(-2*b0/a0*y[j,k]-2*b0,2*b0/a0*y[j,k]+2*b0))):
fun_out[j,k]=change
else:
fun_out[j,k]=fun_in[j,k]
if change=='masked':
fun_out=np.ma.array(fun_out, mask=np.isnan(fun_out))
else:
Ny, Nz = len(y), len(z)
Ly, Lz = y[Ny-1]*2, z[Nz-1]*2
a0=Ly/2
b0=a0*np.sin(np.pi/3)
fun_out=np.zeros((len(x),len(y),len(z)))
for j in range(Ny):
for k in range(Nz):
if not(between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0))):
fun_out[:,j,k]=np.ones(len(x))*change
else:
fun_out[:,j,k]=fun_in[:,j,k]
if change=='masked':
fun_out=np.ma.array(fun_out, mask=np.isnan(fun_out))
return (fun_out)
#%%
def mask_wire(fun_in,N,dis,change=np.nan,include=np.array(['wire']),W_w=0,W_l1=0,W_l2=0,faces_l1=np.array([]),faces_l2=np.array([])):
"""
Mask for wires. This function change the values for those points of fun_in
which are outside the hexagonal section and/or layers surrounding the wire.
Parameters
----------
fun_in: arr
Function to be masked.
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
change: value
Value to which change those points outside of the hexagonal section.
include: arr
Whether to include the wire ('wire') and/or some layers ('layer_1
and/or 'layer_2').
W_w: float
Width of the nanowire. If W_w=0, then the width is taken as N*dis.
W_l1: float
Width of the first layer surrounding the wire. W_l1=0 means that
there is no layer.
W_l2: float
Width of the first layer surrounding the wire. W_l1=0 means that
there is no (second) layer.
faces_l1: arr
Facets that the first layer covers to the wire. Each facet is
labeled with a number from 1 to 6 (the upper one is 1, and the rest
are numbered clockwise). Each element of the array denotes with a
string (e.g. np.array(['1','2'])) if such facet is covered.
faces_l2: arr
Same for the second layer.
Returns
-------
fun_out: arr
Masked function.
"""
if len(N)==3:
Nx, Ny, Nz = N
dis_x, dis_y, dis_z = dis
fun_in=fun_in[0]
elif len(N)==2:
Ny, Nz = N
dis_y, dis_z = dis
y, z= np.linspace(-(Ny-1)*dis_y/2,(Ny-1)*dis_y/2,Ny), np.linspace(-(Nz-1)*dis_z/2,(Nz-1)*dis_z/2,Nz)
if np.isscalar(W_w):
if W_w==0:
W_w=Ny*dis_y
a0=W_w/2
b0=a0*np.sin(np.pi/3)
elif not(np.isscalar(W_w)):
a0=Ny*dis_y/2
b0=Nz*dis_z/2*np.sin(np.pi/3)
if faces_l1.size==0:
faces_l1=np.array(['0'])
if faces_l2.size==0:
faces_l2=np.array(['0'])
fun_out=np.zeros((Ny,Nz))
for j in range(Ny):
for k in range(Nz):
if (include=='wire').any():
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0))):
fun_out[j,k]=fun_in[j,k]
else:
fun_out[j,k]=change
if (include=='layer_1').any():
if (faces_l1=='1').any() and ((between(y[j], (-a0/2,a0/2)) and between(z[k], (b0,b0+W_l1)))):
fun_out[j,k]=fun_in[j,k]
elif (faces_l1=='2').any() and ((between(z[k], (-2*b0/a0*y[j]+2*b0,2*b0/a0*y[j]+W_l1)) and between(z[k], (2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0+W_l1)))):
fun_out[j,k]=fun_in[j,k]
elif (faces_l1=='6').any() and ((between(z[k], (2*b0/a0*y[j]+2*b0,-2*b0/a0*y[j]+W_l1)) and between(z[k], (-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0+W_l1)))):
fun_out[j,k]=fun_in[j,k]
elif (faces_l1=='3').any() and ((between(z[k], (-b0,2*b0/a0*y[j]-2*b0)) and between(z[k], (2*b0/a0*y[j]-2*b0-W_l1,-2*b0/a0*y[j]+2*b0+W_l1)))):
fun_out[j,k]=fun_in[j,k]
elif (faces_l1=='5').any() and ((between(z[k], (-b0,-2*b0/a0*y[j]-2*b0)) and between(z[k], (-2*b0/a0*y[j]-2*b0-W_l1,2*b0/a0*y[j]+2*b0+W_l1)))):
fun_out[j,k]=fun_in[j,k]
elif (faces_l1=='4').any() and ((between(y[j], (-a0/2-W_l1/2,a0/2+W_l1/2)) and between(z[k], (-b0-W_l1,-b0)))):
fun_out[j,k]=fun_in[j,k]
if (include=='layer_2').any():
if (faces_l2=='1').any():
if (faces_l1=='1').any() and ((between(y[j], (-a0/2,a0/2)) and between(z[k], (b0+W_l1,b0+W_l1+W_l2)))):
fun_out[j,k]=fun_in[j,k]
elif not(faces_l1=='1').any() and ((between(y[j], (-a0/2,a0/2)) and between(z[k], (b0,b0+W_l2)))):
fun_out[j,k]=fun_in[j,k]
if (faces_l2=='2').any():
if (faces_l1=='2').any() and ((between(z[k], (-2*b0/a0*y[j]+2*b0+W_l1,2*b0/a0*y[j]+W_l1+W_l2)) and between(z[k], (2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0+W_l1+W_l2)))):
fun_out[j,k]=fun_in[j,k]
elif not(faces_l1=='2').any() and ((between(z[k], (-2*b0/a0*y[j]+2*b0,2*b0/a0*y[j]+W_l2)) and between(z[k], (2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0+W_l2)))):
fun_out[j,k]=fun_in[j,k]
if (faces_l2=='6').any():
if (faces_l1=='6').any() and ((between(z[k], (2*b0/a0*y[j]+2*b0+W_l1,-2*b0/a0*y[j]+W_l1+W_l2)) and between(z[k], (-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0+W_l1+W_l2)))):
fun_out[j,k]=fun_in[j,k]
elif not(faces_l1=='6').any() and ((between(z[k], (2*b0/a0*y[j]+2*b0,-2*b0/a0*y[j]+W_l2)) and between(z[k], (-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0+W_l2)))):
fun_out[j,k]=fun_in[j,k]
if (faces_l2=='3').any():
if (faces_l1=='3').any() and ((between(z[k], (-b0,2*b0/a0*y[j]-2*b0-W_l1)) and between(z[k], (2*b0/a0*y[j]-2*b0-W_l1-W_l2,-2*b0/a0*y[j]+2*b0+W_l1+W_l2)))):
fun_out[j,k]=fun_in[j,k]
elif not(faces_l1=='3').any() and ((between(z[k], (-b0,2*b0/a0*y[j]-2*b0)) and between(z[k], (2*b0/a0*y[j]-2*b0-W_l2,-2*b0/a0*y[j]+2*b0+W_l2)))):
fun_out[j,k]=fun_in[j,k]
if (faces_l2=='5').any():
if (faces_l1=='5').any() and ((between(z[k], (-b0,-2*b0/a0*y[j]-2*b0-W_l1)) and between(z[k], (-2*b0/a0*y[j]-2*b0-W_l1-W_l2,2*b0/a0*y[j]+2*b0+W_l1+W_l2)))):
fun_out[j,k]=fun_in[j,k]
elif not(faces_l1=='5').any() and ((between(z[k], (-b0,-2*b0/a0*y[j]-2*b0)) and between(z[k], (-2*b0/a0*y[j]-2*b0-W_l2,2*b0/a0*y[j]+2*b0+W_l2)))):
fun_out[j,k]=fun_in[j,k]
if (faces_l2=='4').any():
if (faces_l1=='4').any() and ((between(y[j], (-a0/2-W_l1/2-W_l2/2,a0/2+W_l1/2+W_l2/2)) and between(z[k], (-b0-W_l1-W_l2,-b0)))):
fun_out[j,k]=fun_in[j,k]
elif not(faces_l1=='4').any() and ((between(y[j], (-a0/2-W_l2/2,a0/2+W_l2/2)) and between(z[k], (-b0-W_l2,-b0)))):
fun_out[j,k]=fun_in[j,k]
if change=='masked':
fun_out=np.ma.array(fun_out, mask=np.isnan(fun_out))
if len(N)==3:
fun_out=np.tile(fun_out,(Nx,1,1))
return (fun_out)
#%%
def interface(N,dis,width,faces,a0,b0):
"""
Find points close to the some nanowire facet (assuming an hexagonal cross-
section nanowire).
Parameters
----------
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
witdh: float
Width of the "close region" to the facet.
faces: arr
Which facets include in the search. Each facet is labeled with a
number from 1 to 6 (the upper one is 1, and the rest are numbered
clockwise). Each element of the array denotes with a string (e.g.
np.array(['1','2'])) if such facet is covered.
Returns
-------
sites: arr
Array with the
"""
L=np.array([(N[0]-1)*dis[0], (N[1]-1)*dis[1], (N[2]-1)*dis[2]])
x, y =np.linspace(-L[1]/2,L[1]/2,N[1]), np.linspace(-L[2]/2,L[2]/2,N[2])
fun_out=np.zeros(N[1::],dtype=int)
for i in range(N[1]):
for j in range(N[2]):
if (faces=='1').any() and ((between(x[i], (-a0/2,a0/2)) and between(y[j], (b0-width,b0)) and between(y[j], (-2*b0/a0*x[i],b0))and between(y[j], (2*b0/a0*x[i],b0)))):
fun_out[i,j]=1
elif (faces=='6').any() and ((between(y[j], (-2*b0/a0*x[i]+2*b0-width*b0/a0*2,2*b0/a0*x[i])) and between(y[j], (2*b0/a0*x[i]-2*b0-width,-2*b0/a0*x[i]+2*b0)) and between(y[j], (0,b0)) )):
fun_out[i,j]=1
elif (faces=='2').any() and ((between(y[j], (2*b0/a0*x[i]+2*b0-width*b0/a0*2,-2*b0/a0*x[i])) and between(y[j], (-2*b0/a0*x[i]-2*b0-width,2*b0/a0*x[i]+2*b0)) and between(y[j], (0,b0)) )):
fun_out[i,j]=1
elif (faces=='5').any() and ((between(y[j], (-b0,2*b0/a0*x[i]-2*b0+width*b0/a0*2)) and between(y[j], (2*b0/a0*x[i]-2*b0,-2*b0/a0*x[i]+2*b0+width)) and between(y[j], (-b0,0)) )):
fun_out[i,j]=1
elif (faces=='3').any() and ((between(y[j], (-b0,-2*b0/a0*x[i]-2*b0+width*b0/a0*2)) and between(y[j], (-2*b0/a0*x[i]-2*b0,2*b0/a0*x[i]+2*b0+width)) and between(y[j], (-b0,0)) )):
fun_out[i,j]=1
elif (faces=='4').any() and ((between(x[i], (-a0/2,a0/2)) and between(y[j], (-b0,-b0+width)))):
fun_out[i,j]=1
fun_out_end=np.zeros(N)
for i in range(N[0]):
fun_out_end[i,:,:]=fun_out
return fun_out_end
#%%
def H_rectangular2hexagonal(H,N,dis,BdG='no',output='H',m=0,sparse='yes'):
"""
Transform a Hamiltonian of a nanwoire with rectangular cross-section to a
nanowire with an hexagonal one.
Parameters
----------
H: arr
Hamiltonian with rectangular section.
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
BdG: str
Whether the Hamiltonian has BdG symmetry.
m: int
Number of sites of the discretized Hamiltonian with the hexagonal
section.
output: str
Whether to return the Hamiltonian (output='H'), the number of sites
of the discretized Hamiltonian with the hexagonal section
(output='m_hex'), or the sites that are inside of the nanowire
section (output='sites').
Returns
-------
Depends on the parameter output.
"""
if len(N)==2:
N=np.array([1,N[0],N[1]])
dis=np.array([0,dis[0],dis[1]])
Nx, Ny, Nz = N[0], N[1], N[2]
Ly, Lz = dis[1]*Ny, dis[2]*Nz
y, z = np.linspace(-float(Ly)/2,float(Ly)/2,Ny), np.linspace(-float(Lz)/2,float(Lz)/2,Nz)
a0=float(Ly)/2
b0=a0*np.sin(np.pi/3)*(Lz/Ly)
l=0
if (output=='H'):
if m==0:
m=H_rectangular2hexagonal(H,N,dis,BdG=BdG,output='m_hex',m=0)
if BdG=='no':
if sparse=='yes':
H_del=scipy.sparse.dok_matrix((m,2*Nx*Ny*Nz),dtype=complex)
else:
H_del=np.zeros((m,2*Nx*Ny*Nz),dtype=complex)
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
H_del[l,2*(k+(j+i*Ny)*Nz)]=1
H_del[l+1,2*(k+(j+i*Ny)*Nz)+1]=1
l=l+2
elif BdG=='yes':
if sparse=='yes':
H_del=scipy.sparse.dok_matrix((m,4*Nx*Ny*Nz),dtype=complex)
else:
H_del=np.zeros((m,4*Nx*Ny*Nz),dtype=complex)
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
H_del[l,2*(k+(j+i*Ny)*Nz)]=1
H_del[l+1,2*(k+(j+i*Ny)*Nz)+1]=1
H_del[l+int(m/2),2*(k+(j+i*Ny)*Nz)+int(2*Nx*Ny*Nz)]=1
H_del[l+1+int(m/2),2*(k+(j+i*Ny)*Nz)+1+int(2*Nx*Ny*Nz)]=1
l=l+2
H=H_del.dot(H.dot(H_del.transpose()))
return (H)
elif (output=='m_hex'):
m=0
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
m=m+1
if BdG=='no':
m=m*2
elif BdG=='yes':
m=m*4
return (m)
elif (output=='sites'):
m=0
sites=np.array([])
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
if (between(z[k], (b0-dis[2],b0))):
sites=np.append(sites,m)
m=m+2
return (sites)
#%%
def U_rectangular2hexagonal(U_in,N,dis,BdG='no',m=0):
"""
Transform a wavefunction of a nanwoire with rectangular cross-section to a
nanowire with an hexagonal one, erasing to this end the elements of the
Hamiltonian outside the hexagonal section of the wire.
Parameters
----------
U_in: arr
Wavefunction of a nanowire with rectangular section.
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
BdG: str
Whether the Hamiltonian has BdG symmetry.
m: int
Number of sites of the hexagonal cross-section nanowire. It can be
computed using the function Function.H_rectangular2hexagonal.
Returns
-------
U: arr
Wavefunction of a nanowire with hexagonal section.
"""
if len(N)==2:
N=np.array([1,N[0],N[1]])
dis=np.array([0,dis[0],dis[1]])
if scipy.sparse.issparse(U_in):
U_in=U_in.todense()
Nx, Ny, Nz = N[0], N[1], N[2]
Ly, Lz = dis[1]*Ny, dis[2]*Nz
y, z = np.linspace(-float(Ly)/2,float(Ly)/2,Ny), np.linspace(-float(Lz)/2,float(Lz)/2,Nz)
a0=float(Ly)/2
b0=a0*np.sin(np.pi/3)*(Lz/Ly)
n_eig=np.shape(U_in)[1]
l=0
if BdG=='no':
U=np.zeros((m,n_eig),dtype=complex)
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
U[l,:], U[l+1,:] = U_in[2*(k+(j+i*Ny)*Nz),:], U_in[2*(k+(j+i*Ny)*Nz)+1,:]
l=l+2
elif BdG=='yes':
U=np.zeros((m,n_eig),dtype=complex)
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
U[l,:], U[l+1,:] = U_in[2*(k+(j+i*Ny)*Nz),:], U_in[2*(k+(j+i*Ny)*Nz)+1,:]
U[l+int(m/2),:], U[l+1+int(m/2),:] = U_in[2*(k+(j+i*Ny)*Nz)+int(2*Nx*Ny*Nz),:], U_in[2*(k+(j+i*Ny)*Nz)+1+int(2*Nx*Ny*Nz),:]
l=l+2
U=scipy.sparse.dok_matrix(U)
return (U)
#%%
def U_hexagonal2rectangular(U_in,N,dis,BdG='no',space='position'):
"""
Transform a wavefunction of a nanwoire with hexagonal cross-section to a
nanowire with an rectangular one, filling with zeros the new elements
outside the hexagonal section of the wire.
Parameters
----------
U_in: arr
Wavefunction of a nanowire with hexagonal section.
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
BdG: str
Whether the Hamiltonian has BdG symmetry.
space: str
Whether the wavefunction is in position space or momentum.
Returns
-------
U: arr
Wavefunction of a nanowire with rectangular section.
"""
if len(N)==2:
N=np.array([1,N[0],N[1]])
dis=np.array([0,dis[0],dis[1]])
if space=='momentum':
Nx, Ny, Nz = N[0], N[1], N[2]
m=len(U_in[:,0,0])
n_eig=len(U_in[0,:,0])
n_k=len(U_in[0,0,:])
if BdG=='no':
U_out = np.empty([2*Nx*Ny*Nz,int(n_eig),n_k],dtype=complex)
elif BdG=='yes':
U_out = np.empty([4*Nx*Ny*Nz,int(n_eig),n_k],dtype=complex)
Ly, Lz = dis[1]*Ny, dis[2]*Nz
y, z = np.linspace(-float(Ly)/2,float(Ly)/2,Ny), np.linspace(-float(Lz)/2,float(Lz)/2,Nz)
a0=float(Ly)/2
b0=a0*np.sin(np.pi/3)*(Lz/Ly)
l=0
if BdG=='no':
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
U_out[2*(k+(j+i*Ny)*Nz),:,:]=U_in[l,:,:]
U_out[2*(k+(j+i*Ny)*Nz)+1,:,:]=U_in[l+1,:,:]
l=l+2
else:
U_out[2*(k+(j+i*Ny)*Nz),:,:]=np.zeros((n_eig,n_k))
U_out[2*(k+(j+i*Ny)*Nz)+1,:,:]=np.zeros((n_eig,n_k))
elif BdG=='yes':
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0)) ):
U_out[2*(k+(j+i*Ny)*Nz),:,:]=U_in[l,:,:]
U_out[2*(k+(j+i*Ny)*Nz)+1,:,:]=U_in[l+1,:,:]
U_out[2*(k+(j+i*Ny)*Nz)+2*Nx*Ny*Nz,:,:]=U_in[l+int(m/2),:,:]
U_out[2*(k+(j+i*Ny)*Nz)+1+2*Nx*Ny*Nz,:,:]=U_in[l+1+int(m/2),:,:]
l=l+2
else:
U_out[2*(k+(j+i*Ny)*Nz),:,:]=np.zeros((n_eig,n_k))
U_out[2*(k+(j+i*Ny)*Nz)+1,:,:]=np.zeros((n_eig,n_k))
U_out[2*(k+(j+i*Ny)*Nz)+2*Nx*Ny*Nz,:,:]=np.zeros((n_eig,n_k))
U_out[2*(k+(j+i*Ny)*Nz)+1+2*Nx*Ny*Nz,:,:]=np.zeros((n_eig,n_k))
elif space=='position':
Nx, Ny, Nz = N[0], N[1], N[2]
m=len(U_in[:,0])
n_eig=len(U_in[0,:])
if BdG=='no':
U_out = np.empty([2*Nx*Ny*Nz,int(n_eig)],dtype=complex)
elif BdG=='yes':
U_out = np.empty([4*Nx*Ny*Nz,int(n_eig)],dtype=complex)
Ly, Lz = dis[1]*Ny, dis[2]*Nz
y, z = np.linspace(-float(Ly)/2,float(Ly)/2,Ny), np.linspace(-float(Lz)/2,float(Lz)/2,Nz)
a0=float(Ly)/2
b0=a0*np.sin(np.pi/3)*(Lz/Ly)
if scipy.sparse.issparse(U_in):
U_in=U_in.todense()
l=0
if BdG=='no':
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0))):
U_out[2*(k+(j+i*Ny)*Nz),:]=U_in[l,:]
U_out[2*(k+(j+i*Ny)*Nz)+1,:]=U_in[l+1,:]
l=l+2
else:
U_out[2*(k+(j+i*Ny)*Nz),:]=np.zeros((n_eig))
U_out[2*(k+(j+i*Ny)*Nz)+1,:]=np.zeros((n_eig))
elif BdG=='yes':
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
if (between(z[k], (-b0,b0)) and between(z[k],(2*b0/a0*y[j]-2*b0,-2*b0/a0*y[j]+2*b0)) and between(z[k],(-2*b0/a0*y[j]-2*b0,2*b0/a0*y[j]+2*b0))):
U_out[2*(k+(j+i*Ny)*Nz),:]=U_in[l,:]
U_out[2*(k+(j+i*Ny)*Nz)+1,:]=U_in[l+1,:]
U_out[2*(k+(j+i*Ny)*Nz)+2*Nx*Ny*Nz,:]=U_in[l+int(m/2),:]
U_out[2*(k+(j+i*Ny)*Nz)+1+2*Nx*Ny*Nz,:]=U_in[l+1+int(m/2),:]
l=l+2
else:
U_out[2*(k+(j+i*Ny)*Nz),:]=np.zeros((n_eig))
U_out[2*(k+(j+i*Ny)*Nz)+1,:]=np.zeros((n_eig))
U_out[2*(k+(j+i*Ny)*Nz)+2*Nx*Ny*Nz,:]=np.zeros((n_eig))
U_out[2*(k+(j+i*Ny)*Nz)+1+2*Nx*Ny*Nz,:]=np.zeros((n_eig))
return (U_out)
#%%
def H_rec2shape(H,shape,N,dis,BdG='no',output='H',m=0):
"""
Transform a Hamiltonian of a nanwoire with rectangular cross-section to a
nanowire with a different one.
Parameters
----------
H: arr
Hamiltonian with rectangular section.
shape: arr or str
Shape of the section. It can be a (Nx,Ny,Nz) or (Ny,Nz) array,
where each 0 element means that the corresponding site is not
part of the section, while 1 means it is; or it can be 'hexagonal',
what means that the section must be an hexagonal shape.
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
BdG: {'yes','no'}
Whether the Hamiltonian has BdG symmetry.
output: {'H','m'}
Either to return the Hamiltonian (output='H') or the number of sites
of the discretized Hamiltonian with the desired shape
(output='m').
m: int
Number of sites of the discretized Hamiltonian with the desired
shape. If m=0, m is computed.
Returns
-------
Depends on the parameter output.
"""
if len(N)==2:
N=np.array([1,N[0],N[1]])
dis=np.array([0,dis[0],dis[1]])
if np.isscalar(shape) and shape=='hexagonal':
shape=np.ones(N)
shape=mask_hexagonal(shape,np.linspace(-N[1]*dis[1]/2,N[1]*dis[1]/2,N[1]),np.linspace(-N[2]*dis[2]/2,N[2]*dis[2]/2,N[2]),x=np.linspace(0,N[0]*dis[0],N[0]),change=0)
shape=shape.flatten()
if m==0:
m=len(shape[shape==1])
if BdG=='no':
m=m*2
elif BdG=='yes':
m=m*4
if scipy.sparse.issparse(H):
sparse='yes'
else:
sparse='no'
if (output=='H'):
if BdG=='no':
if sparse=='yes':
H_del=scipy.sparse.dok_matrix((m,2*np.prod(N)),dtype=complex)
else:
H_del=np.zeros((m,2*np.prod(N)),dtype=complex)
elif BdG=='yes':
if sparse=='yes':
H_del=scipy.sparse.dok_matrix((m,4*np.prod(N)),dtype=complex)
else:
H_del=np.zeros((m,4*np.prod(N)),dtype=complex)
j=0
for i in range(np.prod(N)):
if shape[i]==1:
H_del[j,2*i],H_del[j+1,2*i+1] = 1, 1
if BdG=='yes':
H_del[j+int(m/2),2*i+2*int(np.prod(N))],H_del[j+1+int(m/2),2*i+1+2*int(np.prod(N))] = 1, 1
j+=2
H=H_del.dot(H.dot(H_del.transpose()))
return (H)
elif (output=='m'):
return (m)
#%%
def U_rec2shape(U_in,shape,N,dis,BdG='no',m=0):
"""
Transform a wavefunction of a nanwoire with rectangular cross-section to a
nanowire with a different one, erasing to this end the elements of the
wavefunction outside the section of the wire.
Parameters
----------
U_in: arr
Wavefunction of a nanowire with rectangular section.
shape: arr or str
Shape of the section. It can be a (Nx,Ny,Nz) or (Ny,Nz) array,
where each np.nan element means that the corresponding site is not
part of the section; or it can be 'hexagonal', what means that the
section must be an hexagonal shape.
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
BdG: str
Whether the Hamiltonian has BdG symmetry.
m: int
Number of sites of the discretized Hamiltonian with the desired
shape. If m=0, m is computed.
Returns
-------
U: arr
Wavefunction of a nanowire with hexagonal section.
"""
if len(N)==2:
N=np.array([1,N[0],N[1]])
dis=np.array([0,dis[0],dis[1]])
n_eig=np.shape(U_in)[1]
if m==0:
m=len(shape[shape==1])
if BdG=='no':
m=m*2
elif BdG=='yes':
m=m*4
if scipy.sparse.issparse(U_in):
sparse='yes'
U_in=U_in.todense()
else:
sparse='no'
if np.isscalar(shape) and shape=='hexagonal':
shape=np.ones(N)
shape=mask_hexagonal(shape,np.linspace(-N[1]*dis[1]/2,N[1]*dis[1]/2,N[1]),np.linspace(-N[2]*dis[2]/2,N[2]*dis[2]/2,N[2]),x=np.linspace(0,N[0]*dis[0],N[0]),change=0)
shape=shape.flatten()
shape=np.repeat(shape,2)
if BdG=='yes':
shape=np.tile(shape,2)
U=np.zeros((m,n_eig),dtype=complex)
U=U_in[shape==1,:]
if sparse=='yes':
U=scipy.sparse.dok_matrix(U)
return (U)
#%%
def U_shape2rec(U_in,shape,N,dis,BdG='no'):
"""
Transform a wavefunction of a nanwoire with an arbitrary cross-section to a
nanowire with an rectangular one, filling with zeros the new elements
outside the hexagonal section of the wire.
Parameters
----------
U_in: arr
Wavefunction of a nanowire with hexagonal section.
shape: arr or str
Shape of the section. It can be a (Nx,Ny,Nz) or (Ny,Nz) array,
where each np.nan element means that the corresponding site is not
part of the section; or it can be 'hexagonal', what means that the
section must be an hexagonal shape.
N: arr
Number of sites in each direction.
dis: arr
Discretization in each direction.
BdG: str
Whether the Hamiltonian has BdG symmetry.
space: str
Whether the wavefunction is in position space or momentum.
Returns
-------
U: arr
Wavefunction of a nanowire with rectangular section.
"""
if len(N)==2:
N=np.array([1,N[0],N[1]])
dis=np.array([0,dis[0],dis[1]])
n_eig=len(U_in[0,:])
if np.isscalar(shape) and shape=='hexagonal':
shape=np.ones(N)
shape=mask_hexagonal(shape,np.linspace(-N[1]*dis[1]/2,N[1]*dis[1]/2,N[1]),np.linspace(-N[2]*dis[2]/2,N[2]*dis[2]/2,N[2]),x=np.linspace(0,N[0]*dis[0],N[0]),change=0)
shape=shape.flatten()
shape=np.repeat(shape,2)
if BdG=='yes':
shape=np.tile(shape,2)
if scipy.sparse.issparse(U_in):
sparse='yes'
U_in=U_in.todense()
else:
sparse='no'
if BdG=='no':
U_out = np.zeros((2*np.prod(N),int(n_eig)),dtype=complex)
elif BdG=='yes':
U_out = np.zeros((4*np.prod(N),int(n_eig)),dtype=complex)
U_out[shape==1,:]=U_in
if sparse=='yes':
U_out=scipy.sparse.dok_matrix(U_out)
return (U_out)
#%% ############################# Spectrum
#%%
def prob(U,N,BdG='yes'):
"""
Obtains the probability density of a given wavefunction.
Parameters
----------
U: arr
Wavefunction in a 1D array.
N: int or arr
Number of sites. Each element of N[i] is the number of sites along
the direction i. If N is int, then there is just one dimension.
BdG: {'yes','no'}
Whether the wavefunction U is written in the BdG formalism.
Returns
-------
P: arr
Probability density of U with the same dimension than N.
"""
P=np.zeros(N)
if BdG=='no':
P=(np.abs(U[0::2])**2+np.abs(U[1::2])**2).reshape(N)
elif BdG=='yes':
P=(np.abs(U[0:2*np.prod(N):2])**2+np.abs(U[1:2*np.prod(N):2])**2+np.abs(U[2*np.prod(N)::2])**2+np.abs(U[2*np.prod(N)+1::2])**2).reshape(N)
return (P)
#%%
def Qtot(E,U,kT):
"""
Computes the total charge in the system.
Parameters
----------
E: scalar or arr
Energies.
U: arr
Eigenstates corresponding to each energy.
kT: scalar
Temperature (in units of energy).
Returns
-------
Qtot: scalar
Total charge in the system.
"""
den=np.dot(U,np.dot(np.diag(1/(1+np.exp(E/kT))),np.transpose(U)))
Qtot=np.sum(np.diag(den)[0:int(len(E)/2)])
return Qtot
#%%
def QM(Uodd,Ueven):
"""
Computes the Majorana charge (wavefunction overlap).
Parameters
----------
Uodd: arr
Eigenstate of the odd-parity Majorana state.
Uevev: arr
Eigenstate of the even-parity Majorana state.
Returns
-------
QM: scalar
Majorana charge (overlap between U_L and U_R).
"""
QM = np.absolute(np.dot(Uodd+Ueven, -1j*(Uodd-Ueven)))
return QM
#%%
def Density_Matrix(E,U,kT):
"""
Computes the density matrix of the system.
Parameters
----------
E: scalar or arr
Energies.
U: arr
Eigenstates corresponding to each energy.
kT: scalar
Temperature (in units of energy).
Returns
-------
den: arr
Density matrix of the system.
"""
den = np.dot(U, np.dot(np.diag(1 / (1 + np.exp(E / kT))), np.transpose(U)))
return den
#%%
def Density(E,U,N,kT):
"""
Computes the charge density of the system.
Parameters
----------
E: arr
Energies.
U: arr
Eigenstates.
N: arr
Number of sites in each direction.
kT: scalar
Temperature (in units of energy).
Returns
-------
den: arr (3D)
Charge density in each site..
"""
np.seterr(over='ignore')
if np.ndim(N)==1:
Nx=N[0]
Ny=N[1]
Nz=N[2]
n_eig=len(E)
den=np.zeros((Nx,Ny,Nz))
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
for m in range(n_eig):
den[i,j,k]=den[i,j,k]+(np.abs(U[2*(k+(i*Ny+j)*Nz),m])**2+np.abs(U[2*(k+(i*Ny+j)*Nz)+1,m])**2)*(1 / (1 + np.exp(E[m] / kT)))
#den = np.dot(U, np.transpose(U))
elif np.ndim(N)==0:
Nx=N
n_eig=len(E)
den=np.zeros((Nx))
for i in range(Nx):
for m in range(n_eig):
den[i]=den[i]+(np.abs(U[2*i,m])**2+np.abs(U[2*i+1,m])**2)*(1 / (1 + np.exp(E[m] / kT)))
#den = np.dot(U, np.transpose(U))
return den
#%%
def Density_momentum(E,U,k,N,kT):
"""
Charge densisty of an infnite system in one direction.
Parameters
----------
E: arr
Energies.
U: arr
Eigenstates.
k: arr
Momentum vector.
N: arr
Number of sites in each direction.
kT: scalar
Temperature (in units of energy).
Returns
-------
den: arr (2D)
Charge density in each site.
"""
Nx=N[0]
Ny=N[1]
Nz=N[2]
n_eig=len(E)
if np.ndim(U)==3:
den=np.zeros((Nx,Ny,Nz))
for i_x in range(Nx):
for i_y in range(Ny):
for i_z in range(Nz):
for i_E in range(n_eig):
den[i_x,i_y,i_z]=den[i_x,i_y,i_z]+(np.abs(U[int(2*(i_z+(i_y+i_x*Ny)*Nz)),i_E,0])**2+np.abs(U[int(2*(i_z+(i_y+i_x*Ny)*Nz))+1,i_E,0])**2)*denfromDOS(k,E[i_E,:],kT)
elif np.ndim(U)==2:
Nx=1
den=np.zeros((Ny,Nz))
i_x=0
for i_y in range(Ny):
for i_z in range(Nz):
for i_E in range(n_eig):
den[i_y,i_z]=den[i_y,i_z]+(np.abs(U[int(2*(i_z+(i_y+i_x*Ny)*Nz)),i_E])**2+np.abs(U[int(2*(i_z+(i_y+i_x*Ny)*Nz))+1,i_E])**2)*denfromDOS(k,E[i_E,:],kT)
return den
#%%
def k_F(mu,aR,Vz,m_eff=0.023):
"""
Find the Fermi momentum for a 1D infinite nanowire.
Parameters
----------
mu: scalar or arr
Chemical potential.
aR: scalar or arr
Spin-orbit coupling.
Vz: scalar or arr
Zeeman splitting.
m_eff: scalar or str
Effective mass.
Returns
-------
k_F: scalar or arr
Fermi momentum.
"""
if m_eff=='InAs':
m_eff=0.023
elif m_eff=='InSb':
m_eff=0.015
m=constants.m_e*m_eff
hbar=constants.hbar
mu,aR,Vz=mu*1e-3*constants.e,aR*1e-12*constants.e,Vz*1e-3*constants.e
kSO=m*aR/hbar**2
kZ=np.sqrt(2*m*Vz)/hbar
kmu_p=2*m*mu/hbar**2
kF=np.zeros(2)
kF[0]=np.sqrt(2*kSO**2+kmu_p+np.sqrt(4*kSO**4+kZ**4+4*kmu_p*kSO**2))
kF[1]=np.sqrt(2*kSO**2+kmu_p-np.sqrt(4*kSO**4+kZ**4+4*kmu_p*kSO**2))
kF=kF*1e-9
return (kF)
#%%
def DOS(k,E):
"""
Density of states of a 1D infinite nanowire.
Parameters
----------
k: arr
momentum vector.
E: arr
Energies.
Returns
-------
DOS: arr
Density of states.
"""
DOS=np.abs(np.gradient(E,k))**(-1)/np.pi
DOS[0]=0
return(DOS)
#%%
def denfromDOS(k,E,kT):
"""
1D charge denisty of an infinite nanowire.
Parameters
----------
k: arr
momentum vector.
E: arr
Energies (in units of energy).
Returns
-------
DOS: arr
Density of states.
"""
np.seterr(over='ignore')
dos=DOS(k,E)
den=0
for i in range(len(E)-1):
den=den+dos[i]*(E[i+1]-E[i])*(1 / (1 + np.exp(E[i] / kT)))
if not(np.abs(k[0])==np.abs(k[-1])):
den=den*2
return (den)
#%%
def LDOS(P_n,E_n,E_sample,a_0=0.0):
"""
Local density of states as a function of the energies E_sample.
Parameters
----------
P_n: arr
Probability density of the wavefunction at a given point for
different eigensates.
E_n: arr
Corresponding energies.
E_sample: arr
Energies in which the LDOS is evaluated.
a_0: float
Dirac delta characteristic length. If a_0=0 perfect Dirac delta is
used, while otherwise it is used an analytical expression for the
Delta with a characteristic width.
Returns
-------
LDOS: arr
Local density of states for a given energies.
"""
n_n=len(E_n)
n_out=len(E_sample)
LDOS=np.zeros(n_out)
if a_0==0.0:
for i in range(n_out-1):
for j in range(n_n):
if (E_sample[i+1]>=E_n[j]) and (E_sample[i]<=E_n[j]):
LDOS[i]=LDOS[i]+P_n[j]
return(LDOS)
else:
if a_0=='none':
a_0=np.abs(E_sample[0]-E_sample[1])*4
def Dirac_delta(E,En,a_0):
return np.exp(-((E-En)/a_0)**2)/(np.sqrt(np.pi)*np.abs(a_0))
for i in range(n_out):
for j in range(n_n):
LDOS[i]=LDOS[i]+P_n[j]*Dirac_delta(E_sample[i],E_n[j],a_0)
return (LDOS)
#%%
def dIdV(LDOS,E_sample,kT):
"""
Differential conductance for a given energies E_sample.
Parameters
----------
LDOS: arr
Local density of states computed using Functions.LDOS.
E_sample: arr
Energies in which the dIdV (and LDOS) is evaluated.
kT: float
Temperature (in units of energy).
Returns
-------
dIdV: arr
Differential conductance for a given energies.
"""
def sech(x):
return 1.0/np.cosh(x)
n=len(E_sample)
dIdV=np.zeros(n)
for i in range(n):
for j in range(n):
dIdV[i]=dIdV[i]+LDOS[j]*sech((E_sample[i]-E_sample[j])/(2*kT))**2
return (dIdV)
#%% ############################# Others
#%%
def Chern_number(H_k,k_vec,N):
"""
Computes the Chern number of a 1D Hamiltonian in k-space.
Parameters
----------
H_k: arr
1D Hamiltonian in k-space. Each element H_k[:,:,i] is the
Hamiltonian evaluated at k_vec[i].
k_vec: arr
Momentum vector of the first Brillouin zone in which the
Hamiltonian is evaluated.
N: int
Number of sites in which the unit cell of the Hamiltonian is
discretized.
Returns
-------
Ch: int
Chern number of the given 1D Hamiltonian.
"""
Gamma=np.zeros((4*N,4*N),dtype=complex)
for i in range(N):
Gamma[2*i:2*i+2,2*i+2*N:2*i+2*N+2]=np.array([[1,0],[0,1]])
Gamma[2*i+2*N:2*i+2*N+2,2*i:2*i+2]=np.array([[1,0],[0,1]])
Ch=np.sign(pf.pfaffian(np.dot(Gamma,H_k[:,:,int((len(k_vec)-1)/2)])))*np.sign(pf.pfaffian(np.dot(Gamma,H_k[:,:,int(len(k_vec)-1)])))
return (Ch)
#%%
def rho_acc(x,y,z,den_acc_in,n_lattice,r_lattice,superlattice_type='none'):
"""
Computes the superficial charge density of a nanowire with hexagonal
section.
Parameters
----------
x,y,z: arr
Positions of the mesh of the nanowire section.
den_acc_in: scalar
Magnitude of the accumulation layer.
n_lattice: int
Number of superlattice cells.
r_lattice: float
Partial coverage of the SC.
superlattice_type: str
Whether the superlattice is on top, at the bottom, or there is no
superlattice (none).
Returns
-------
rho_acc: arr
Charge density inside the wire due to the charge accumulation layer.
"""
Nx, Ny, Nz = len(x), len(y), len(z)
Lx, Ly, Lz = x[Nx-1], y[Ny-1]*2, z[Nz-1]*2
a0=Ly/2
b0=a0*np.sin(np.pi/3)
dis=np.array([np.abs(x[0]-x[1]),np.abs(y[0]-y[1]),np.abs(z[0]-z[1])])
L_SC, L_0=Lx/n_lattice*r_lattice, Lx/n_lattice*(1-r_lattice)
den_acc_out=np.zeros((Nx,Ny,Nz))
if superlattice_type=='top':
den_acc_out[:,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,-b0)]=np.ones((Nx,arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
for j in range(Nx):
for i in range(n_lattice+1):
if (x[j]>=L_SC/2+i*(L_SC+L_0)) and (x[j]<=L_SC/2+L_0+i*(L_SC+L_0)):
den_acc_out[j,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,b0)]=np.ones((arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
elif superlattice_type=='bottom':
den_acc_out[:,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,b0)]=np.ones((Nx,arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
for j in range(Nx):
for i in range(n_lattice+1):
if (x[j]>=L_SC/2+i*(L_SC+L_0)) and (x[j]<=L_SC/2+L_0+i*(L_SC+L_0)):
den_acc_out[j,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,-b0)]=np.ones((arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
elif superlattice_type=='none':
den_acc_out[:,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,-b0)+1]=np.ones((Nx,arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
den_acc_out[:,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,b0)-1]=np.ones((Nx,arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
else:
for j in range(Nx):
for i in range(n_lattice+1):
if (x[j]>=L_SC/2+i*(L_SC+L_0)) and (x[j]<=L_SC/2+L_0+i*(L_SC+L_0)):
den_acc_out[j,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,-b0)]=np.ones((arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
den_acc_out[j,arg_isclose(y,-a0/2):arg_isclose(y,a0/2)+1,arg_isclose(z,b0)]=np.ones((arg_isclose(y,a0/2)-arg_isclose(y,-a0/2)+1))*den_acc_in
for k in range(Nz):
if (z[k]>=-b0) and (z[k]<=0):
den_acc_out[:,arg_isclose(2*b0/a0*y-2*b0,z[k]-dis[2])+1,k]=np.ones(Nx)*den_acc_in
den_acc_out[:,arg_isclose(-2*b0/a0*y-2*b0,z[k]-dis[2])-1,k]=np.ones(Nx)*den_acc_in
elif (z[k]<=b0) and (z[k]>=0):
den_acc_out[:,arg_isclose(2*b0/a0*y+2*b0,z[k]+dis[2])-1,k]=np.ones(Nx)*den_acc_in
den_acc_out[:,arg_isclose(-2*b0/a0*y+2*b0,z[k]+dis[2])+1,k]= | np.ones(Nx) | numpy.ones |
"""Miscellaneous utility functions.
"""
from enum import Enum
import re
import inspect
import itertools
from scipy import ndimage as ndi
from numpydoc.docscrape import FunctionDoc
import numpy as np
import wrapt
def str_to_rgb(arg):
"""Convert an rgb string 'rgb(x,y,z)' to a list of ints [x,y,z].
"""
return list(
map(int, re.match(r'rgb\((\d+),\s*(\d+),\s*(\d+)\)', arg).groups())
)
def ensure_iterable(arg, color=False):
"""Ensure an argument is an iterable. Useful when an input argument
can either be a single value or a list. If a color is passed then it
will be treated specially to determine if it is iterable.
"""
if is_iterable(arg, color=color):
return arg
else:
return itertools.repeat(arg)
def is_iterable(arg, color=False):
"""Determine if a single argument is an iterable. If a color is being
provided and the argument is a 1-D array of length 3 or 4 then the input
is taken to not be iterable.
"""
if arg is None:
return False
elif type(arg) is str:
return False
elif np.isscalar(arg):
return False
elif color and isinstance(arg, (list, np.ndarray)):
if np.array(arg).ndim == 1 and (len(arg) == 3 or len(arg) == 4):
return False
else:
return True
else:
return True
def is_rgb(shape):
"""If last dim is 3 or 4 assume image is rgb.
"""
ndim = len(shape)
last_dim = shape[-1]
if ndim > 2 and last_dim < 5:
return True
else:
return False
def is_pyramid(data):
"""If shape of arrays along first axis is strictly decreasing.
"""
size = np.array([np.prod(d.shape) for d in data])
if len(size) > 1:
return np.all(size[:-1] > size[1:])
else:
return False
def trim_pyramid(pyramid):
"""Trim very small arrays of top of pyramid.
Parameters
----------
pyramid : list of array
Pyramid data
Returns
-------
trimmed : list of array
Trimmed pyramid data
"""
keep = [np.any(np.greater_equal(p.shape, 2 ** 6 - 1)) for p in pyramid]
if np.sum(keep) >= 2:
return [p for k, p in zip(keep, pyramid) if k]
else:
return pyramid[:2]
def should_be_pyramid(shape):
"""Check if any data axes needs to be pyramidified
Parameters
----------
shape : tuple of int
Shape of data to be tested
Returns
-------
pyr_axes : tuple of bool
True wherever an axis exceeds the pyramid threshold.
"""
return np.log2(shape) >= 13
def get_pyramid_and_rgb(data, pyramid=None, rgb=None):
"""Check if data is or needs to be a pyramid and make one if needed.
Parameters
----------
data : array, list, or tuple
Data to be checked if pyramid or if needs to be turned into a pyramid.
pyramid : bool, optional
Value that can force data to be considered as a pyramid or not,
otherwise computed.
rgb : bool, optional
Value that can force data to be considered as a rgb, otherwise
computed.
Returns
-------
ndim : int
Dimensionality of the data.
rgb : bool
If data is rgb.
pyramid : bool
If data is a pyramid or a pyramid has been generated.
data_pyramid : list or None
If None then data is not and does not need to be a pyramid. Otherwise
is a list of arrays where each array is a level of the pyramid.
"""
# Determine if data currently is a pyramid
currently_pyramid = is_pyramid(data)
if currently_pyramid:
shapes = [d.shape for d in data]
init_shape = shapes[0]
else:
init_shape = data.shape
# Determine if rgb, and determine dimensionality
if rgb is False:
pass
else:
# If rgb is True or None then guess if rgb
# allowed or not, and if allowed set it to be True
rgb_guess = is_rgb(init_shape)
if rgb and rgb_guess is False:
raise ValueError(
"Non rgb or rgba data was passed, but rgb data was"
" requested."
)
else:
rgb = rgb_guess
if rgb:
ndim = len(init_shape) - 1
else:
ndim = len(init_shape)
if pyramid is False:
if currently_pyramid:
raise ValueError(
"Non pyramided data was requested, but pyramid"
" data was passed"
)
else:
data_pyramid = None
else:
if currently_pyramid:
data_pyramid = trim_pyramid(data)
pyramid = True
else:
# Guess if data should be pyramid or if a pyramid was requested
if pyramid:
pyr_axes = [True] * ndim
else:
pyr_axes = should_be_pyramid(data.shape)
if np.any(pyr_axes):
pyramid = True
# Set axes to be downsampled to have a factor of 2
downscale = np.ones(len(data.shape))
downscale[pyr_axes] = 2
largest = np.min(np.array(data.shape)[pyr_axes])
# Determine number of downsample steps needed
max_layer = np.floor(np.log2(largest) - 9).astype(int)
data_pyramid = fast_pyramid(
data, downscale=downscale, max_layer=max_layer
)
data_pyramid = trim_pyramid(data_pyramid)
else:
data_pyramid = None
pyramid = False
return ndim, rgb, pyramid, data_pyramid
def fast_pyramid(data, downscale=2, max_layer=None):
"""Compute fast image pyramid.
In the interest of speed this method subsamples, rather than downsamples,
the input image.
Parameters
----------
data : array
Data from which pyramid is to be generated.
downscale : int or list
Factor to downscale each step of the pyramid by. If a list, one value
must be provided for every axis of the array.
max_layer : int, optional
The maximum number of layers of the pyramid to be created.
Returns
-------
pyramid : list
List of arrays where each array is a level of the generated pyramid.
"""
if max_layer is None:
max_layer = np.floor(np.log2(np.max(data.shape))).astype(int) + 1
zoom_factor = np.divide(1, downscale)
pyramid = [data]
for i in range(max_layer - 1):
pyramid.append(
ndi.zoom(pyramid[i], zoom_factor, prefilter=False, order=0)
)
return pyramid
def increment_unnamed_colormap(name, names):
"""Increment name for unnamed colormap.
Parameters
----------
name : str
Name of colormap to be incremented.
names : str
Names of existing colormaps.
Returns
-------
name : str
Name of colormap after incrementing.
"""
if name == '[unnamed colormap]':
past_names = [n for n in names if n.startswith('[unnamed colormap')]
name = f'[unnamed colormap {len(past_names)}]'
return name
def calc_data_range(data):
"""Calculate range of data values. If all values are equal return [0, 1].
Parameters
-------
data : array
Data to calculate range of values over.
Returns
-------
values : list of float
Range of values.
Notes
-----
If the data type is uint8, no calculation is performed, and 0-255 is
returned.
"""
if data.dtype == np.uint8:
return [0, 255]
if np.prod(data.shape) > 1e6:
# If data is very large take the average of the top, bottom, and
# middle slices
bottom_plane_idx = (0,) * (data.ndim - 2)
middle_plane_idx = tuple(s // 2 for s in data.shape[:-2])
top_plane_idx = tuple(s - 1 for s in data.shape[:-2])
idxs = [bottom_plane_idx, middle_plane_idx, top_plane_idx]
reduced_data = [
[np.max(data[idx]) for idx in idxs],
[np.min(data[idx]) for idx in idxs],
]
else:
reduced_data = data
min_val = np.min(reduced_data)
max_val = np.max(reduced_data)
if min_val == max_val:
min_val = 0
max_val = 1
return [float(min_val), float(max_val)]
def compute_max_shape(shapes, max_dims=None):
"""Computes the maximum shape combination from the given shapes.
Parameters
----------
shapes : iterable of tuple
Shapes to coombine.
max_dims : int, optional
Pre-computed maximum dimensions of the final shape.
If None, is computed on the fly.
Returns
-------
max_shape : tuple
Maximum shape combination.
"""
shapes = tuple(shapes)
if max_dims is None:
max_dims = max(len(shape) for shape in shapes)
max_shape = [0] * max_dims
for dim in range(max_dims):
for shape in shapes:
try:
dim_len = shape[dim]
except IndexError:
pass
else:
if dim_len > max_shape[dim]:
max_shape[dim] = dim_len
return tuple(max_shape)
def formatdoc(obj):
"""Substitute globals and locals into an object's docstring."""
frame = inspect.currentframe().f_back
try:
obj.__doc__ = obj.__doc__.format(
**{**frame.f_globals, **frame.f_locals}
)
return obj
finally:
del frame
def segment_normal(a, b, p=(0, 0, 1)):
"""Determines the unit normal of the vector from a to b.
Parameters
----------
a : np.ndarray
Length 2 array of first point or Nx2 array of points
b : np.ndarray
Length 2 array of second point or Nx2 array of points
p : 3-tuple, optional
orthogonal vector for segment calculation in 3D.
Returns
-------
unit_norm : np.ndarray
Length the unit normal of the vector from a to b. If a == b,
then returns [0, 0] or Nx2 array of vectors
"""
d = b - a
if d.ndim == 1:
if len(d) == 2:
normal = np.array([d[1], -d[0]])
else:
normal = np.cross(d, p)
norm = np.linalg.norm(normal)
if norm == 0:
norm = 1
else:
if d.shape[1] == 2:
normal = | np.stack([d[:, 1], -d[:, 0]], axis=0) | numpy.stack |
#!/usr/bin/env python
# coding: utf-8
# # Model Specification
# This example solve a standard AR(1) process but with multiple noise measurements. If there are many parameters, we need more data for proper estimation.
# In[11]:
import numpy as np
import pandas as pd
import linkalman
import scipy
from linkalman.models import BaseConstantModel as BCM
from linkalman.core.utils import gen_PSD
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import datetime
from copy import deepcopy
# # Unrestricted Parametrization of Covariance Matrices
# Sometime we want to let a covariance matrix (e.g. `R`) to be fully parametrized without restriction (e.g. PSD of `R`). Here I use `linkalman.core.utils.gen_PSD` to achieve this. It uses Cholesky decomposition with strictly non-negative diagonal values to achieve unique and restriction-free parametrizaion.
# In[12]:
def my_f(theta):
"""
AR(1) model. Introduce noise and
"""
# Define theta
f = 1 / (1 + np.exp(theta[3]))
sigma = np.exp(theta[4])
# Generate F
F = np.array([[f]])
# Generate Q
Q = np.array([[sigma]])
# Generate R, set to 0 to be consistent with AR(1) process
R = gen_PSD(theta[0:3], 2) # need three parameters to define a 2-by-2 R
# Generate H
H = np.array([[1], [theta[5]]]) # enforce one coefficient to be 1 to make the system more identifiable.
# Generate D
D = np.array([[theta[6]], [theta[7]]])
# Collect system matrices
M = {'F': F, 'Q': Q, 'H': H, 'R': R, 'D': D}
return M
# In[13]:
def my_solver(param, obj_func, verbose=False, **kwargs):
"""
Simple solver for LLY
"""
obj_ = lambda x: -obj_func(x)
def disp_f(x):
print('theta is {}. Function value is: {}.'.format(x, obj_func(x)))
callbackf = None
if verbose:
callbackf = disp_f
res = minimize(obj_, param, callback=callbackf, **kwargs)
theta_opt = np.array(res.x)
fval_opt = res.fun
return theta_opt, fval_opt
# In[14]:
# Initialize the model
x = 1 # used to calculate stationary mean
model = BCM()
model.set_f(my_f, x_0=x * np.ones([1, 1]))
model.set_solver(my_solver, method='nelder-mead',
options={'xatol': 1e-8, 'maxfev': 200}, verbose=False)
# # Generate Synthetic Data
# Same as the standard setup, but I cross off some measurements during training period and see how `linkalman` handles them. I generate some partial missing data for each of the measurements.
# In[15]:
# Some initial parameters
theta = np.array([0.1, 0.3, 0.1, -0.5, -0.1, 2, 4, 5])
T = 3000
train_split_ratio = 0.7
forecast_cutoff_ratio = 0.8
missing_range_1st = [0.3, 0.4] # range of missing for the first measurement
missing_range_2nd_end = 0.5 # end ratio of missing for the second measurement
# Split train data
train_split_t = np.floor(T * train_split_ratio).astype(int)
# Generate missing data for forcasting
forecast_t = np.floor(T * forecast_cutoff_ratio).astype(int)
# If we want AR(1) with non-zero stationary mean, we should proivde a constant
x_col = ['const']
Xt = pd.DataFrame({x_col[0]: x * | np.ones(T) | numpy.ones |
import openml
import numpy as np
from sklearn.preprocessing import LabelEncoder
import pandas as pd
from torch.utils.data import Dataset
def simple_lapsed_time(text, lapsed):
hours, rem = divmod(lapsed, 3600)
minutes, seconds = divmod(rem, 60)
print(text+": {:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
def task_dset_ids(task):
dataset_ids = {
'binary': [1487,44,1590,42178,1111,31,42733,1494,1017,4134],
'multiclass': [188, 1596, 4541, 40664, 40685, 40687, 40975, 41166, 41169, 42734],
'regression':[541, 42726, 42727, 422, 42571, 42705, 42728, 42563, 42724, 42729]
}
return dataset_ids[task]
def concat_data(X,y):
# import ipdb; ipdb.set_trace()
return pd.concat([pd.DataFrame(X['data']), pd.DataFrame(y['data'][:,0].tolist(),columns=['target'])], axis=1)
def data_split(X,y,nan_mask,indices):
x_d = {
'data': X.values[indices],
'mask': nan_mask.values[indices]
}
if x_d['data'].shape != x_d['mask'].shape:
raise'Shape of data not same as that of nan mask!'
y_d = {
'data': y[indices].reshape(-1, 1)
}
return x_d, y_d
def data_prep_CBC(seed, task, datasplit=[.65, .15, .2]):
np.random.seed(seed)
#Load data
CBC_file_dir = "data/ProcessedData-2021-Filtrados.csv"
CBC = pd.read_csv(CBC_file_dir, error_bad_lines=True)
CBC = CBC[CBC['Clase'] != 2]
CBC['Clase'] = CBC['Clase'].replace(to_replace = 3, value = 2)
CBC['Clase'] = CBC['Clase'].replace(to_replace = 4, value = 2)
healthy = CBC.loc[CBC['Clase'] == 0]
thalassemias = CBC.loc[CBC['Clase'] == 1]
anemias = CBC.loc[CBC['Clase'] == 2]
CBC = pd.concat([healthy,thalassemias, anemias])
y = CBC['Clase']
CBC = CBC.drop('Clase', axis=1)
CBC = CBC.drop('TipoClase', axis=1)
X = CBC
categorical_indicator = []
for i in range(0, len(X.iloc[0])): categorical_indicator.append(False)
categorical_columns = X.columns[list(np.where( | np.array(categorical_indicator) | numpy.array |
import random
import numpy as np
from PyQt5.QtWidgets import QMainWindow, QApplication
from OpenGL import GL, GLU, GLUT
import logging
from ui.mainwindow import Ui_MainWindow
def random_rgb():
"""
Get random bright enough color
"""
color = [random.random() for _ in range(4)]
if sum(color[:3]) / 3 < 0.5:
return random_rgb()
else:
return color
def cart2pol(x, y):
r = np.sqrt(x ** 2 + y ** 2)
ang = np.arctan2(y, x)
return r, ang
def pol2cart(r, ang):
x = r * np.cos(ang)
y = r * np.sin(ang)
return x, y
# noinspection PyPep8Naming
class MainWindow(QMainWindow, Ui_MainWindow):
generated_points = []
generated_colors = []
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.primitiveComboBox.activated.connect(self.resetRandomAndUpdate)
self.AlphaSlider.valueChanged.connect(self.openGLWidget.update)
self.AlphaComboBox.activated.connect(self.openGLWidget.update)
self.blendSFactor.activated.connect(self.openGLWidget.update)
self.blendDFactor.activated.connect(self.openGLWidget.update)
self.updateButton.clicked.connect(self.resetRandomAndUpdate)
self.XScissorSlider.valueChanged.connect(self.openGLWidget.update)
self.YScissorSlider.valueChanged.connect(self.openGLWidget.update)
self.openGLWidget.initializeGL()
self.openGLWidget.paintGL = self.paintGL
self.actionsDict = {
"GL_POINTS": self.paintGL_random,
"GL_LINES": self.paintGL_random,
"GL_LINE_STRIP": self.paintGL_random,
"GL_LINE_LOOP": self.paintGL_random,
"GL_TRIANGLES": self.paintGL_random,
"GL_TRIANGLE_STRIP": self.paintGL_circular_random,
"GL_TRIANGLE_FAN": self.paintGL_circular_random,
"GL_QUADS": self.paintGL_quads,
"GL_QUAD_STRIP": self.paintGL_quad_strip,
"GL_POLYGON": self.paintGL_polygon,
"Фрактал": self.paintGL_fractal,
"<NAME>": self.paintGL_spline
}
def resetRandomAndUpdate(self):
self.fractalLevelSpinBox.setEnabled(False)
self.generated_points = []
self.generated_colors = []
self.openGLWidget.update()
def glScissorTest(self):
GL.glEnable(GL.GL_SCISSOR_TEST)
# print(self.x_scissor, self.y_scissor)
x_scissor = self.XScissorSlider.value() / 100
y_scissor = self.YScissorSlider.value() / 100
GL.glScissor(
int(x_scissor * self.openGLWidget.width()),
int(y_scissor * self.openGLWidget.height()),
self.openGLWidget.width(),
self.openGLWidget.height())
def glAlphaTest(self):
GL.glEnable(GL.GL_ALPHA_TEST)
alpha_method = self.AlphaComboBox.currentText()
alpha_value = self.AlphaSlider.value() / 100
GL.glAlphaFunc(getattr(GL, alpha_method), alpha_value)
def glBlendTest(self):
GL.glEnable(GL.GL_BLEND)
sFactor = self.blendSFactor.currentText()
dFactor = self.blendDFactor.currentText()
GL.glBlendFunc(getattr(GL, sFactor), getattr(GL, dFactor))
def loadScene(self):
width, height = self.openGLWidget.width(), self.openGLWidget.height()
GL.glViewport(0, 0, width, height)
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
GL.glOrtho(0, 1, 0, 1, -1, 1)
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
def paintGL(self):
# print('Paint')
self.loadScene()
self.glScissorTest()
self.glAlphaTest()
self.glBlendTest()
self.actionsDict[self.primitiveComboBox.currentText()]()
def paintGL_random(self):
GL.glPointSize(2)
random_dict = {
"GL_POINTS": {
"GL_MODE": GL.GL_POINTS,
"POINTS_NUM": 50
},
"GL_LINES": {
"GL_MODE": GL.GL_LINES,
"POINTS_NUM": 5 * 2
},
"GL_LINE_STRIP": {
"GL_MODE": GL.GL_LINE_STRIP,
"POINTS_NUM": 5
},
"GL_LINE_LOOP": {
"GL_MODE": GL.GL_LINE_LOOP,
"POINTS_NUM": 5
},
"GL_TRIANGLES": {
"GL_MODE": GL.GL_TRIANGLES,
"POINTS_NUM": 3 * 3
}
}
GL.glBegin(
random_dict[self.primitiveComboBox.currentText()]
['GL_MODE'])
self.drawRandomPoints(
random_dict
[self.primitiveComboBox.currentText()]
['POINTS_NUM'])
GL.glEnd()
GL.glFinish()
def drawRandomPoints(self, number):
if len(self.generated_points) == 0:
self.generated_points = [
(np.random.random(), np.random.random()) for _ in range(number)]
self.generated_colors = [random_rgb() for _ in range(number)]
self.placeGeneratedPoints()
def placeGeneratedPoints(self):
for point, color in zip(self.generated_points, self.generated_colors):
GL.glColor4d(*color)
GL.glVertex2d(*point)
def paint(self, X1, Y1, X2, Y2):
line11 = []
line12 = []
line13 = []
start = (0.1, 0.1)
finish = (0.9, 0.1)
draggable1 = (X1, Y1)
draggable2 = (X2, Y2)
GL.glBegin(GL.GL_POINTS)
GL.glVertex2d(*draggable2)
GL.glVertex2d(*draggable1)
GL.glEnd()
GL.glBegin(GL.GL_LINE_STRIP)
for i in range(100):
line11.append(((draggable1[0] - start[0])/100*i + start[0],
(draggable1[1] - start[1])/100*i + start[1]))
line12.append(
((draggable2[0] - draggable1[0]) / 100 * i + draggable1[0],
(draggable2[1] - draggable1[1]) / 100 * i + draggable1[1]))
line13.append(
((finish[0] - draggable2[0]) / 100 * i + draggable2[0],
(finish[1] - draggable2[1]) / 100 * i + draggable2[1]))
line21 = []
line22 = []
for i in range(100):
dot1 = line11[i]
dot2 = line12[i]
dot3 = line13[i]
line21.append(((dot2[0] - dot1[0]) / 100 * i + dot1[0],
(dot2[1] - dot1[1]) / 100 * i + dot1[1]))
line22.append(((dot3[0] - dot2[0]) / 100 * i + dot2[0],
(dot3[1] - dot2[1]) / 100 * i + dot2[1]))
for i in range(100):
dot21 = line21[i]
dot22 = line22[i]
GL.glVertex2d((dot22[0] - dot21[0]) / 100 * i + dot21[0],
(dot22[1] - dot21[1]) / 100 * i + dot21[1])
GL.glEnd()
GL.glFinish()
def paintGL_spline(self):
self.paint(0.25, 0.75, 0.75, 0.75)
def paintGL_circular_random(self):
random_dict = {
"GL_TRIANGLE_STRIP": GL.GL_TRIANGLE_STRIP,
"GL_TRIANGLE_FAN": GL.GL_TRIANGLE_FAN,
}
acc_angle = 0
N = 5
if len(self.generated_points) == 0:
self.generated_colors.append(random_rgb())
self.generated_points.append((0.5, 0.5))
max_rad = 0.5
self.generated_colors.extend([random_rgb() for _ in range(N)])
for _ in range(N):
r = np.random.random() * max_rad
acc_angle += random.random() * 360 / N
x, y = pol2cart(r, acc_angle / 180 * np.pi)
x += 0.5
y += 0.5
self.generated_points.append((x, y))
GL.glPointSize(2)
GL.glBegin(random_dict[self.primitiveComboBox.currentText()])
self.placeGeneratedPoints()
GL.glEnd()
GL.glFinish()
def paintGL_quads(self):
N = 4
if len(self.generated_points) == 0:
for _ in range(N):
c_x, c_y = np.random.random() * 0.98 + 0.01, np.random.random() * 0.98 + 0.01
max_rad = min(c_x, 1 - c_x, c_y, 1 - c_y)
acc_angle = 0
r = np.random.random() * max_rad
self.generated_colors.extend([random_rgb() for _ in range(4)])
for _ in range(4):
acc_angle += random.random() * 360 / 4
x, y = pol2cart(r, acc_angle / 180 * np.pi)
x += c_x
y += c_y
self.generated_points.append((x, y))
GL.glPointSize(2)
GL.glBegin(GL.GL_QUADS)
self.placeGeneratedPoints()
GL.glEnd()
GL.glFinish()
def paintGL_quad_strip(self):
N = 4
if len(self.generated_points) == 0:
y_s = sorted([np.random.random() for _ in range(N)])
x_s = [[np.random.random() for _ in range(2)] for _ in range(N)]
[l.sort() for l in x_s]
self.generated_colors = [random_rgb() for _ in range(N * 2)]
for i in range(N):
self.generated_points.append((x_s[i][0], y_s[i]))
self.generated_points.append((x_s[i][1], y_s[i]))
GL.glPointSize(2)
GL.glBegin(GL.GL_QUAD_STRIP)
self.placeGeneratedPoints()
GL.glEnd()
GL.glFinish()
def paintGL_polygon(self):
N = 8
if len(self.generated_points) == 0:
acc_angle = 0
r = np.random.random() * 0.5
self.generated_colors = [random_rgb() for _ in range(N)]
for _ in range(N):
acc_angle += random.random() * 360 / N
x, y = pol2cart(r, acc_angle / 180 * np.pi)
x += 0.5
y += 0.5
self.generated_points.append((x, y))
GL.glPointSize(2)
GL.glBegin(GL.GL_POLYGON)
self.placeGeneratedPoints()
GL.glEnd()
GL.glFinish()
def paintGL_fractal(self):
self.fractalLevelSpinBox.setEnabled(True)
GL.glBegin(GL.GL_LINE_STRIP)
start = (0.20, 0.5)
end = (0.80, 0.5)
self.drawFractal(*start, *end, level=self.fractalLevelSpinBox.value())
GL.glEnd()
def drawFractal(self, x1, y1, x2, y2, level=1, recursive=False):
p1, p2 = np.array((x1, y1)), np.array((x2, y2))
vec = p2 - p1
coef = np.linalg.norm(vec)
center = p1 + vec / 2
angle = cart2pol(*vec)[1]
rotate_matr = np.array([ # Матрица поворота
[ | np.cos(angle) | numpy.cos |
import numpy as np
import scipy as sp
def reshape_dims(M,dims=None):
num_dim = np.ndim(M)
if num_dim ==3:
M1r= M.reshape((np.prod(dims[:2]),dims[2]),order='F')
elif num_dim ==2:
M1r = M.reshape(dims,order='F')
return M1r
def remove_trend(Y_rm,detrend_option='linear'):
mean_pixel = Y_rm.mean(axis=1, keepdims=True)
Y_rm2 = Y_rm - mean_pixel
# Detrend
if detrend_option=='linear':
detr_data = sp.signal.detrend(Y_rm2,axis=1,type='l')
#elif detrend_option=='quad':
#detr_data = detrend(Y_rm)
else:
print('Add option')
Y_det = detr_data + mean_pixel
offset = Y_rm - Y_det
return Y_det, offset
def unpad(x):
"""
Given padded matrix with nan
Get rid of all nan in order (row, col)
Parameters:
----------
x: np.array
array to unpad (all nan values)
Outputs:
-------
x: np.array
unpaded array (will not contain nan values)
dimension might be different from input array
"""
x = x[:, ~np.isnan(x).all(0)]
x = x[~np.isnan(x).all(1)]
return x
def pad(array, reference_shape, offsets, array_type=np.nan):
"""
Pad array wrt reference_shape exlcluding offsets with dtype=array_type
Parameters:
----------
array: np.array
array to be padded
reference_shape:tuple
size of narray to create
offsets: tuple
list of offsets (number of elements must be equal
to the dimension of the array)
will throw a ValueError if offsets is too big and the
reference_shape cannot handle the offsets
array_type: dtype
data type to pad array with.
Outputs:
-------
result: np.array (reference_shape)
padded array given input
"""
# Create an array of zeros with the reference shape
result = np.ones(reference_shape) * array_type
# Create a list of slices from offset to offset + shape in each dimension
insertHere = [slice(offsets[dim], offsets[dim] + array.shape[dim])
for dim in range(array.ndim)]
# Insert the array in the result at the specified offsets
result[insertHere] = array
return result
def nextpow2(value):
"""
Extracted from
caiman.source_extraction.cnmf.deconvolution import axcov
Find exponent such that 2^exponent is >= abs(value).
Parameters:
----------
value : int
Returns:
-------
exponent : int
"""
exponent = 0
avalue = np.abs(value)
while avalue > np.power(2, exponent):
exponent += 1
return exponent
def axcov(data, maxlag=10):
"""
Edited from cnmf.deconvolution
Compute the autocovariance of data at lag = -maxlag:0:maxlag
Parameters:
----------
data : array
Array containing fluorescence data
maxlag : int
Number of lags to use in autocovariance calculation
Output:
-------
axcov : array
Autocovariances computed from -maxlag:0:maxlag
"""
data = data - np.mean(data)
T = len(data)
bins = np.size(data)
xcov = np.fft.fft(data, np.power(2, nextpow2(2 * bins - 1)))
xcov = np.fft.ifft(np.square(np.abs(xcov)))
xcov = np.concatenate([xcov[np.arange(xcov.size - maxlag, xcov.size)],
xcov[np.arange(0, maxlag + 1)]])
return np.real(np.divide(xcov, T))
#### SOME FILTERS
def low_pass_weights(window, cutoff):
"""Calculate weights for a low pass Lanczos filter.
Args:
window: int
The length of the filter window.
cutoff: float
The cutoff frequency in inverse time steps.
"""
order = ((window - 1) // 2 ) + 1
nwts = 2 * order + 1
w = | np.zeros([nwts]) | numpy.zeros |
import numpy as np
import tensorflow as tf
from util import concrete_shape
from transforms import Simplex, Logit
"""
Utility methods for defining constrained variables as transforms of an unconstrained parameterization.
"""
def unconstrained(shape=None, init=None, name=None):
if init is None:
shape = concrete_shape(shape)
init = np.float32(np.random.randn(*shape))
val = tf.Variable(init, name=name)
return val
def unconstrained_zeros(shape=None, name=None):
shape = concrete_shape(shape)
init = np.float32(np.zeros(shape))
val = tf.Variable(init, name=name)
return val
def unconstrained_small(shape=None, name=None):
shape = concrete_shape(shape)
init = np.float32(np.random.randn(*shape) * 1e-6)
val = tf.Variable(init, name=name)
return val
def unconstrained_scale(shape=None, name=None):
shape = concrete_shape(shape)
init = np.float32(np.random.randn(*shape))
val = tf.Variable(init, name=name)
scale = tf.Variable(np.float32(1e-6), name=name)
return val * scale
def simplex_constrained(shape=None, init_log=None, name=None):
if init_log is None:
shape = concrete_shape(shape)
init_log = np.float32(np.random.randn(*shape))
log_value = tf.Variable(init_log, name= "log_"+name if name is not None else None)
return Simplex.transform(log_value)
def unit_interval(shape=None, init_log=None, name=None):
# Defines a matrix each element of which is in the unit interval.
# This is different from simplex_constrained which defines a
# vector guaranteed to be in the unit simplex.
if init_log is None:
shape = concrete_shape(shape)
init_log = np.float32(np.random.randn(*shape))
log_value = tf.Variable(init_log, name= "log_"+name if name is not None else None)
return Logit.transform(log_value)
def positive_exp(shape=None, init_log=None, name=None):
# a Tensor of values that are pointwise positive, represented by an exponential
if init_log is None:
shape = concrete_shape(shape)
init_log = np.float32(np.ones(shape) * -10)
log_value = tf.Variable(init_log, name= "log_"+name if name is not None else None)
pos_value = tf.exp(tf.clip_by_value(log_value, -42, 42), name=name)
return pos_value
def psd_matrix(shape=None, init=None, name=None):
assert(init is None) # TODO figure out init semantics
n, n2 = shape
assert(n==n2)
init = np.float32( | np.eye(n) | numpy.eye |
import numpy as np
from frankapy import FrankaArm, SensorDataMessageType
from frankapy import FrankaConstants as FC
from frankapy.proto_utils import sensor_proto2ros_msg, make_sensor_group_msg
from frankapy.proto import ForcePositionSensorMessage, ForcePositionControllerSensorMessage
from franka_interface_msgs.msg import SensorDataGroup
from frankapy.utils import transform_to_list, min_jerk
from tqdm import trange
import rospy
if __name__ == "__main__":
fa = FrankaArm()
fa.reset_joints()
fa.close_gripper()
while True:
input('Presse [Enter] to enter guide mode and move robot to be on top of a flat surface.')
fa.run_guide_mode()
while True:
inp = input('[r]etry or [c]ontinue? ')
if inp not in ('r', 'c'):
print('Please give valid input!')
else:
break
if inp == 'c':
break
rospy.loginfo('Generating Trajectory')
# EE will follow a 2D circle while pressing down with a target force
dt = 0.01
T = 10
ts = | np.arange(0, T, dt) | numpy.arange |
"""
The microstructure module provide elementary classes to describe a
crystallographic granular microstructure such as mostly present in
metallic materials.
It contains several classes which are used to describe a microstructure
composed of several grains, each one having its own crystallographic
orientation:
* :py:class:`~pymicro.crystal.microstructure.Microstructure`
* :py:class:`~pymicro.crystal.microstructure.Grain`
* :py:class:`~pymicro.crystal.microstructure.Orientation`
"""
import numpy as np
import os
import vtk
import h5py
import math
from pathlib import Path
from scipy import ndimage
from matplotlib import pyplot as plt, colors
from pymicro.crystal.lattice import Lattice, Symmetry, CrystallinePhase, Crystal
from pymicro.crystal.quaternion import Quaternion
from pymicro.core.samples import SampleData
import tables
from math import atan2, pi
class Orientation:
"""Crystallographic orientation class.
This follows the passive rotation definition which means that it brings
the sample coordinate system into coincidence with the crystal coordinate
system. Then one may express a vector :math:`V_c` in the crystal coordinate
system from the vector in the sample coordinate system :math:`V_s` by:
.. math::
V_c = g.V_s
and inversely (because :math:`g^{-1}=g^T`):
.. math::
V_s = g^T.V_c
Most of the code to handle rotations has been written to comply with the
conventions laid in :cite:`Rowenhorst2015`.
"""
def __init__(self, matrix):
"""Initialization from the 9 components of the orientation matrix."""
g = np.array(matrix, dtype=np.float64).reshape((3, 3))
self._matrix = g
self.euler = Orientation.OrientationMatrix2Euler(g)
self.rod = Orientation.OrientationMatrix2Rodrigues(g)
self.quat = Orientation.OrientationMatrix2Quaternion(g, P=1)
def orientation_matrix(self):
"""Returns the orientation matrix in the form of a 3x3 numpy array."""
return self._matrix
def __repr__(self):
"""Provide a string representation of the class."""
s = 'Crystal Orientation \n-------------------'
s += '\norientation matrix = \n %s' % self._matrix.view()
s += '\nEuler angles (degrees) = (%8.3f,%8.3f,%8.3f)' % (self.phi1(), self.Phi(), self.phi2())
s += '\nRodrigues vector = %s' % self.OrientationMatrix2Rodrigues(self._matrix)
s += '\nQuaternion = %s' % self.OrientationMatrix2Quaternion(self._matrix, P=1)
return s
def to_crystal(self, v):
"""Transform a vector or a matrix from the sample frame to the crystal
frame.
:param ndarray v: a 3 component vector or a 3x3 array expressed in
the sample frame.
:return: the vector or matrix expressed in the crystal frame.
"""
if v.size not in [3, 9]:
raise ValueError('input arg must be a 3 components vector '
'or a 3x3 matrix, got %d vlaues' % v.size)
g = self.orientation_matrix()
if v.size == 3:
# input is vector
return np.dot(g, v)
else:
# input is 3x3 matrix
return np.dot(g, np.got(v, g.T))
def to_sample(self, v):
"""Transform a vector or a matrix from the crystal frame to the sample
frame.
:param ndarray v: a 3 component vector or a 3x3 array expressed in
the crystal frame.
:return: the vector or matrix expressed in the sample frame.
"""
if v.size not in [3, 9]:
raise ValueError('input arg must be a 3 components vector '
'or a 3x3 matrix, got %d vlaues' % v.size)
g = self.orientation_matrix()
if v.size == 3:
# input is vector
return np.dot(g.T, v)
else:
# input is 3x3 matrix
return np.dot(g.T, np.got(v, g))
@staticmethod
def cube():
"""Create the particular crystal orientation called Cube and which
corresponds to euler angle (0, 0, 0)."""
return Orientation.from_euler((0., 0., 0.))
@staticmethod
def brass():
"""Create the particular crystal orientation called Brass and which
corresponds to euler angle (35.264, 45, 0)."""
return Orientation.from_euler((35.264, 45., 0.))
@staticmethod
def copper():
"""Create the particular crystal orientation called Copper and which
corresponds to euler angle (90, 35.264, 45)."""
return Orientation.from_euler((90., 35.264, 45.))
@staticmethod
def s3():
"""Create the particular crystal orientation called S3 and which
corresponds to euler angle (59, 37, 63)."""
return Orientation.from_euler((58.980, 36.699, 63.435))
@staticmethod
def goss():
"""Create the particular crystal orientation called Goss and which
corresponds to euler angle (0, 45, 0)."""
return Orientation.from_euler((0., 45., 0.))
@staticmethod
def shear():
"""Create the particular crystal orientation called shear and which
corresponds to euler angle (45, 0, 0)."""
return Orientation.from_euler((45., 0., 0.))
@staticmethod
def random():
"""Create a random crystal orientation."""
from random import random
from math import acos
phi1 = random() * 360.
Phi = 180. * acos(2 * random() - 1) / np.pi
phi2 = random() * 360.
return Orientation.from_euler([phi1, Phi, phi2])
def ipf_color(self, axis=np.array([0., 0., 1.]), symmetry=Symmetry.cubic, saturate=True):
"""Compute the IPF (inverse pole figure) colour for this orientation.
This method has bee adapted from the DCT code.
.. note::
This method coexist with the `get_ipf_colour` for the moment.
:param ndarray axis: the direction to use to compute the IPF colour.
:param Symmetry symmetry: the symmetry operator to use.
:return bool saturate: a flag to saturate the RGB values.
"""
axis /= np.linalg.norm(axis)
Vc = np.dot(self.orientation_matrix(), axis)
# get the symmetry operators
syms = symmetry.symmetry_operators()
syms = np.concatenate((syms, -syms))
Vc_syms = np.dot(syms, Vc)
# phi: rotation around 001 axis, from 100 axis to Vc vector, projected on (100,010) plane
Vc_phi = np.arctan2(Vc_syms[:, 1], Vc_syms[:, 0]) * 180 / pi
# chi: rotation around 010 axis, from 001 axis to Vc vector, projected on (100,001) plane
Vc_chi = np.arctan2(Vc_syms[:, 0], Vc_syms[:, 2]) * 180 / pi
# psi : angle from 001 axis to Vc vector
Vc_psi = np.arccos(Vc_syms[:, 2]) * 180 / pi
if symmetry is Symmetry.cubic:
angleR = 45 - Vc_chi # red color proportional to (45 - chi)
minAngleR = 0
maxAngleR = 45
angleB = Vc_phi # blue color proportional to phi
minAngleB = 0
maxAngleB = 45
elif symmetry is Symmetry.hexagonal:
angleR = 90 - Vc_psi # red color proportional to (90 - psi)
minAngleR = 0
maxAngleR = 90
angleB = Vc_phi # blue color proportional to phi
minAngleB = 0
maxAngleB = 30
else:
raise(ValueError('unsupported crystal symmetry to compute IPF color'))
# find the axis lying in the fundamental zone
fz_list = ((angleR >= minAngleR) & (angleR < maxAngleR) &
(angleB >= minAngleB) & (angleB < maxAngleB)).tolist()
if not fz_list.count(True) == 1:
raise(ValueError('problem moving to the fundamental zone'))
return None
i_SST = fz_list.index(True)
r = angleR[i_SST] / maxAngleR
g = (maxAngleR - angleR[i_SST]) / maxAngleR * (maxAngleB - angleB[i_SST]) / maxAngleB
b = (maxAngleR - angleR[i_SST]) / maxAngleR * angleB[i_SST] / maxAngleB
rgb = np.array([r, g, b])
if saturate:
rgb = rgb / rgb.max()
return rgb
def get_ipf_colour(self, axis=np.array([0., 0., 1.]), symmetry=Symmetry.cubic):
"""Compute the IPF (inverse pole figure) colour for this orientation.
Given a particular axis expressed in the laboratory coordinate system,
one can compute the so called IPF colour based on that direction
expressed in the crystal coordinate system as :math:`[x_c,y_c,z_c]`.
There is only one tuple (u,v,w) such that:
.. math::
[x_c,y_c,z_c]=u.[0,0,1]+v.[0,1,1]+w.[1,1,1]
and it is used to assign the RGB colour.
:param ndarray axis: the direction to use to compute the IPF colour.
:param Symmetry symmetry: the symmetry operator to use.
:return tuple: a tuple contining the RGB values.
"""
axis /= np.linalg.norm(axis)
# find the axis lying in the fundamental zone
for sym in symmetry.symmetry_operators():
Osym = np.dot(sym, self.orientation_matrix())
Vc = | np.dot(Osym, axis) | numpy.dot |
# -*- coding: utf-8 -*-
"""
Package for dealing with alignment methods between two AmpObject meshes
Copyright: <NAME> 2020, <EMAIL>
"""
import numpy as np
import copy
import vtk
import math
from scipy import spatial
from scipy.optimize import minimize
from ampscan.core import AmpObject
from ampscan.vis import vtkRenWin
from ampscan.analyse import create_slices, est_volume, calc_csa
# For doc examples
import os
staticfh = os.path.join(os.getcwd(), "tests", "stl_file.stl")
movingfh = os.path.join(os.getcwd(), "tests", "stl_file_2.stl")
class align(object):
r"""
Automated alignment methods between two meshes
Parameters
----------
moving: AmpObject
The moving AmpObject that is to be aligned to the static object
static: AmpObject
The static AmpObject that the moving AmpObject that the moving object
will be aligned to
method: str, default 'linPoint2Plane'
A string of the method used for alignment
*args:
The arguments used for the alignment methods
**kwargs:
The keyword arguments used for the alignment methods
Returns
-------
m: AmpObject
The aligned AmpObject, it same number of vertices and face array as
the moving AmpObject
Access this using align.m
Examples
--------
>>> static = AmpObject(staticfh)
>>> moving = AmpObject(movingfh)
>>> al = align(moving, static).m
"""
def __init__(self, moving, static, method = 'linPoint2Plane',
inverse=False, *args, **kwargs):
mData = dict(zip(['vert', 'faces', 'values'],
[moving.vert, moving.faces, moving.values]))
alData = copy.deepcopy(mData)
self.m = AmpObject(alData, stype='reg')
self.s = static
if inverse:
self.inverse(method=method, *args, **kwargs)
else:
self.runICP(method=method, *args, **kwargs)
def runICP(self, method = 'linPoint2Plane', maxiter=20, inlier=1.0,
initTransform=None, *args, **kwargs):
r"""
The function to run the ICP algorithm, this function calls one of
multiple methods to calculate the affine transformation
Parameters
----------
method: str, default 'linPoint2Plane'
A string of the method used for alignment
maxiter: int, default 20
Maximum number of iterations to run the ICP algorithm
inlier: float, default 1.0
The proportion of closest points to use to calculate the
transformation, if < 1 then vertices with highest error are
discounted
*args:
The arguments used for the alignment methods
**kwargs:
The keyword arguments used for the alignment methods
"""
# Define the rotation, translation, error and quaterion arrays
Rs = np.zeros([3, 3, maxiter+1])
Ts = np.zeros([3, maxiter+1])
err = np.zeros([maxiter+1])
if initTransform is None:
initTransform = np.eye(4)
Rs[:, :, 0] = initTransform[:3, :3]
Ts[:, 0] = initTransform[3, :3]
fC = self.s.vert[self.s.faces].mean(axis=1)
kdTree = spatial.cKDTree(fC)
self.m.rigidTransform(Rs[:, :, 0], Ts[:, 0])
inlier = math.ceil(self.m.vert.shape[0]*inlier)
[dist, idx] = kdTree.query(self.m.vert, 1)
# Sort by distance
sort = | np.argsort(dist) | numpy.argsort |
"""
This is the implementation of the AYS Environment in the form
that it can used within the Agent-Environment interface
in combination with the DRL-agent.
@author: <NAME>
"""
import sys
import numpy as np
from scipy.integrate import odeint
import AYS.ays_model as ays
import AYS.ays_general as ays_general
from DeepReinforcementLearning.Basins import Basins
from gym import Env
import mpl_toolkits.mplot3d as plt3d
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib.offsetbox import AnchoredText
from plots.AYS_3D_figures import create_figure
import plots.AYS_3D_figures as ays_plot
import os
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
@np.vectorize
def inv_compactification(y, x_mid):
if y == 0:
return 0.
if np.allclose(y, 1):
return np.infty
return x_mid * y / (1 - y)
from inspect import currentframe, getframeinfo
def get_linenumber():
print_debug_info()
print("Line: ")
cf = currentframe()
return cf.f_back.f_lineno
def print_debug_info():
frameinfo = getframeinfo(currentframe())
print ("File: ", frameinfo.filename)
class AYS_Environment(Env):
"""
The environment is based on Kittel et al. 2017, and contains in part code adapted from
https://github.com/timkittel/ays-model/ .
This Environment describes the 3D implementation of a simple model for the development of climate change, wealth
and energy transformation which is inspired by the model from Kellie-Smith and Cox.
Dynamic variables are :
- excess atmospheric carbon stock A
- the economic output/production Y (similar to wealth)
- the renewable energy knowledge stock S
Parameters
----------
- sim_time: Timestep that will be integrated in this simulation step
In each grid point the agent can choose between subsidy None, A, B or A and B in combination.
"""
dimensions=np.array( ['A','Y','S'] )
management_options=['default', 'LG' , 'ET','LG+ET' ]
action_space=[(False, False), (True, False), (False, True), (True, True)]
action_space_number=np.arange(len(action_space))
# AYS example from Kittel et al. 2017:
tau_A = 50
tau_S = 50
beta = 0.03
beta_LG = 0.015
eps = 147
A_offset = 600
theta = beta /(950-A_offset) # beta / ( 950 - A_offset(=350) )
#theta = 8.57e-5
rho = 2.
sigma = 4e12
sigma_ET = sigma*0.5**(1/rho)
#sigma_ET = 2.83e12
phi = 4.7e10
AYS0 = [240, 7e13, 5e11]
possible_test_cases=[[0.4949063922255394, 0.4859623171738628, 0.5] , [0.42610779 ,0.52056811, 0.5]]
def __init__(self, t0=0, dt=1 , reward_type='PB', image_dir='./images/', run_number=0, plot_progress=False):
self.image_dir=image_dir
self.run_number = run_number
self.plot_progress=plot_progress
# The grid defines the number of cells, hence we have 8x8 possible states
self.final_state=False
self.reward=0
self.reward_type=reward_type
self.reward_function=self.get_reward_function(reward_type)
timeStart = 0
intSteps = 10 # integration Steps
self.t=self.t0=t0
self.dt=dt
self.sim_time_step=np.linspace(timeStart,dt, intSteps)
self.green_fp=[0,1,1]
self.brown_fp=[0.6,0.4,0]
self.final_radius=0.05 # Attention depending on how large the radius is, the BROWN_FP can be reached!
self.color_list=ays_plot.color_list
self.X_MID= [240, 7e13, 5e11]
# Definitions from outside
self.current_state=[0.5, 0.5, 0.5]
self.state=self.start_state=self.current_state
self.observation_space=self.state
"""
This values define the planetary boundaries of the AYS model
"""
self.A_PB = self._compactification( ays.boundary_parameters["A_PB"] , self.X_MID[0]) # Planetary boundary
self.Y_SF = self._compactification( ays.boundary_parameters["W_SF"] , self.X_MID[1]) # Social foundations as boundary
self.S_LIMIT=0
self.PB=[self.A_PB, self.Y_SF,0]
print("Init AYS Environment!",
"\nReward Type: " + reward_type,
"\nSustainability Boundaries [A_PB, Y_SF, S_ren]: ", inv_compactification(self.PB, self.X_MID) )
def step(self, action):
"""
This function performs one simulation step in a RFL algorithm.
It updates the state and returns a reward according to the chosen reward-function.
"""
next_t= self.t + self.dt
self.state=self._perform_step(action, next_t)
self.t=next_t
if self._arrived_at_final_state():
self.final_state = True
reward=self.reward_function(action) # TODO check if this might be needed before step is done to evaluate the current state, not the next state!
if not self._inside_planetary_boundaries():
self.final_state = True
#print("Left planetary boundaries!" + str(self.state))
reward=0
return self.state, reward, self.final_state
def _perform_step(self, action, next_t):
parameter_list=self._get_parameters(action)
traj_one_step=odeint(ays.AYS_rescaled_rhs, self.state, [self.t, next_t] , args=parameter_list[0], mxstep=50000)
a = traj_one_step[:,0][-1]
y = traj_one_step[:,1][-1]
s = traj_one_step[:,2][-1]
return np.array( (a,y,s) )
def reset(self):
#self.state=np.array(self.random_StartPoint())
self.state=np.array(self.current_state_region_StartPoint())
#self.state=np.array(self.current_state)
self.final_state=False
self.t=self.t0
return self.state
def reset_for_state(self, state=None):
if state==None:
self.start_state=self.state= | np.array(self.current_state) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 4 15:23:46 2018
@author: Jared
"""
from collections import Counter
import pymongo
import pandas as pd
from ast import literal_eval
from ml.elements import *
#import machineLearner as ml #get rid of if doing oqmd database
#from qmpy import * #use python 2.7!
from matplotlib import pyplot as plt
import math
#import mysql.connector
import numpy as np
from pandas.plotting import scatter_matrix
import matplotlib.patches as mpatches
import matplotlib
import matplotlib.gridspec as gridspec
# ENERGY OF FORMATION
# dH = totalEnergy - sum(i,x){x*E_i}, x number of atoms of that type
# STABILITY (E_HULL)
# dH_stab = dH - dH_hull
# dH_hull (ev/atom), but calculated a different way than our energy of formation
# We need
# Access Syntax for direct acces to DB
'''
cnx = mysql.connector.connect(user='root', password='<PASSWORD>',
host='127.0.0.1',
database='qmpy_jared')
cursor = cnx.cursor()
cursor.execute("USE qmpy_jared;")
cursor.close()
cnx.close()
'''
# DEFINITIONS FOR OQMD DATA
'''
space = 'Cs-Sn-Br'
comp = 'CsSnBr3'
space = PhaseSpace(space)
energy, phase = space.gclp(comp)
compute_stability
print(energy, phase)
'''
def main():
matplotlib.rcParams.update({'font.size': 15.5})
# QUICK LOAD TO AVOID CALCULATION
path = '/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/'
deltaH_qmpy = pd.read_csv(path + 'compEnergy_qmdb_d3.csv')
print('qmpy ', len(deltaH_qmpy))
mng_client = pymongo.MongoClient('localhost', 27017)
db = mng_client['perovskites']
# GET AGGREGATED CRYSTAL DATA FROM MONGODB
df = pd.DataFrame(list(db['qw_outputs_aggregated'].find()))
#df = pd.read_csv('/Users/Jared/Dropbox/Master Thesis/Data/crystalDB3/aggregated_features_14092018.csv')
df_features = pd.read_csv('/Users/Jared/Dropbox/Master Thesis/Data/featureDB2/d2_paper_24102018.csv')
'''
plt.ylabel('$E_{gap}$ (eV)')
plt.xlabel('Iodine Mixing Fraction')
plt.title('Iodine Bandgap Trend')
s = 'fracI'
s2 = 'dir_gap'
y_cl = df_features.groupby([s])[s2].mean()
x_cl = np.array([i for i in y_cl.index])
y_cl = y_cl.values
plt.scatter(df_features[s], df_features[s2], alpha = 0.2)
p1, = plt.plot(x_cl, y_cl, linestyle = '-', lw = 2, label = 'D$_{3}$')
ax1 = plt.axes()
ax1.yaxis.set_major_locator(plt.MaxNLocator(6))
#plt.legend(handles = [p1])
plt.tight_layout()
path = '/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/'
plt.savefig(path + 'dummyTrend_realI.png', dpi = 400, bbox_inches="tight")
plt.show()
'''
#df = df.dropna(axis = 0)
dff = df.drop(df[df['nIterations'] >= 201].index).copy()
dff = dff.drop(df[df['crystal_id'] == 1526850748].index).copy()
df = dff.drop(df[df['crystal_id'] == 1526752626].index).copy()
print('here', len(df))
#deltaH_qmdb = getCrystalOQMDData(df)
# MY CALCULATED FORMATION ENERGY
mu = getMuCorrectedDFT2()
deltaH2_formation = getDeltaH_formation(df, mu)
mu = getMuDFT()
deltaH_formation = getDeltaH_formation(df, mu)
#df_delta = pd.DataFrame(deltaH_formation, columns = 'dH_formation')
#deltaH_formation.to_csv('/Users/Jared/Dropbox/Master Thesis/code/codeOutputs/df_formation.csv')
#plotDeltaH_formation(list(deltaH_formation['deltaH_formation']))
# GEOMETRIC FORMATION ENERGY (BASED ON FIT)
#deltaH_geo = getDeltaH_geo(df)
#deltaH_geo.to_csv('/Users/Jared/Dropbox/Master Thesis/' +
# 'code/codeOutputs/deltaH_geo.csv')
deltaH_geo = pd.read_csv('/Users/Jared/Dropbox/Master Thesis/' +
'code/codeOutputs/deltaH_geo.csv')
print('geo', len(deltaH_geo))
#plotDeltaH_geo(list(deltaH_geo['deltaH_geo']))
# comparison of geometric approach fidelity
'''
plt.plot(deltaH_geo['descriptor'], deltaH['deltaH'], 'o')
plt.xlabel('$(t + \mu)^{\eta}$')
plt.ylabel('$\Delta H_{f}$ (eV/atom)')
plt.title('Formation Energy vs. Geometric Factor')
plt.show()
'''
#error associated with SG15 basis set
#delta = ((10.78 + 8.19 + 7.69 + 0.19)*(4/20) +
# (4.35 + 8.07)*(4/20) +
# (1.9 + 6.03 + 5.53)*(8/20))
# MERGE ALL DATA
result = pd.merge(deltaH_formation, deltaH_qmpy, on=['crystal_id'])
result = pd.merge(result, deltaH_geo, on=['crystal_id'])
result= pd.merge(result, df_features, on=['crystal_id'])
result_corrected = pd.merge(deltaH2_formation, deltaH_qmpy, on=['crystal_id'])
result_corrected = pd.merge(result_corrected, deltaH_geo, on=['crystal_id'])
result_corrected = pd.merge(result_corrected, df_features, on=['crystal_id'])
sresult = result_corrected
'''
result = result[result.crystal_id != 1519471915]
result = result[result.crystal_id != 1519608323]
result = result[result.crystal_id != 1519429441]
result = result[result.crystal_id != 1520265350]
result = result[result.crystal_id != 1520268226]
result = result[result.crystal_id != 1520334800]
result = result[result.crystal_id != 1520343157]
result = result[result.crystal_id != 1520349833]
result = result[result.crystal_id != 1520411007]
result = result[result.crystal_id != 1520429554]
result = result[result.crystal_id != 1520442584]
result = result[result.crystal_id != 1520483780]
'''
# big plot
my_dpi = 500
fig = plt.figure(figsize=(5, 5), dpi=my_dpi)
m = np.array((list(result['deltaH_formation'] - result['deltaH_hull'])))
m = m.mean()
m = 0.150 # 100 mev line
ymin = 1.12*min(result['deltaH_hull']) if min(result['deltaH_hull']) <=0 else 0.88*min(result['deltaH_hull'])
ymax = 1.12*max(result['deltaH_hull']) if max(result['deltaH_hull']) >=0 else 0.88*max(result['deltaH_hull'])
xmax = ymax
plt.ylim(ymin, ymax)
plt.xlim(ymin, xmax)
xy = [min(result['deltaH_hull']), max(result['deltaH_hull'])]
xy = [ymin, ymax]
p1, = plt.plot(xy, xy, color = 'k', label = '$E_{hull}$')
p0c, = plt.plot(result['deltaH_hull'],
result_corrected['deltaH_formation'], 'o',
alpha = 0.5, color = 'r', label = '$\mu_{corrected}$')
p0, = plt.plot(result['deltaH_hull'],
result['deltaH_formation'], 'o',
alpha = 0.5, label = '$\mu$')
#p1, = plt.plot(xy, xy, color = 'k', label = '$E_{hull}$')
#xy = [min(result['deltaH_hull']), max(result['deltaH_hull'])]
#p2, = plt.plot(xy, [i + m for i in xy], alpha = 1.0,
# color = 'k',
# label = '$\Delta E_{hull}$ = 100 meV',
# linestyle = '--', linewidth = 3.0)
plt.xlabel('$\Delta H_{f, OQMD}$ (eV/atom)')
plt.ylabel('$\Delta H_{f}$ (eV/atom)')
plt.title('Convex Hull Distance', y = 1.04)
plt.legend(handles = [p0c, p0, p1])
ax1 = plt.axes()
ax1.xaxis.set_major_locator(plt.MaxNLocator(6))
ax1.yaxis.set_major_locator(plt.MaxNLocator(6))
ax1.tick_params(bottom = True, top = True, left = True, right = True,
direction = 'in')
plt.savefig(path + 'paper_oqmdb_new1.png', dpi=400, bbox_inches="tight")
plt.show()
'''
# hist plot
c, d, e = plt.hist(list(result['deltaH_formation'] - result['deltaH_hull']), bins = 21)
plt.setp(e, edgecolor='w', lw=1, alpha = 0.7)
#plt.title('Stability of ' + str(len(result)) + ' Compounds')
#plt.xlabel('$E_{hull}$ distance (eV)')
#plt.ylabel('Count')
c, d, e = plt.hist(
list(result_corrected['deltaH_formation'] -
result['deltaH_hull']), bins = 21, color = 'r')
plt.setp(e, edgecolor='w', lw=1, alpha = 0.7)
plt.title('D$_{3}$ Hull Distance')
plt.xlabel('$\Delta E_{hull}$ (eV)')
plt.ylabel('Count')
ax1 = plt.axes()
ax1.tick_params(bottom = True, top = True, left = True, right = True,
direction = 'in')
plt.savefig(path + 'oqmdb_new1.png', dpi=400, bbox_inches="tight")
plt.show()
'''
#sresult = result_corrected.copy() #result_corrected[['fracCl','fracBr',
# 'fracI', 'fracCs',
#'fracRb', 'fracNa',
#'fracK', 'fracSn',
# 'fracGe', 'deltaH_hull']]
#plt.scatter(result['fracCl'], result['deltaH_hull'])
#print(sresult['t'])
print(len(sresult))
#
#
# lattice validity
t1 = 2*(sresult['lb'].values)/(sresult['la'].values)
t2 = 2*(sresult['lb'].values)/(sresult['lc'].values)
'''
blue_patch = mpatches.Patch(color='blue', label='2*lb/la')
red_patch = mpatches.Patch(color='red', label='2*lb/lc')
c2, d2, e2 = plt.hist(t1, bins = 21, color = 'b')
plt.setp(e2, edgecolor='w', lw=1, alpha = 0.7)
c1, d1, e1 = plt.hist(t2, bins = 21, color = 'r')
plt.setp(e1, edgecolor='w', lw=1, alpha = 0.7)
plt.legend(handles=[blue_patch, red_patch])
plt.title('D$_{3}$ Perovskite Validity')
plt.xlabel('Lattice Vector Ratio')
plt.ylabel('Count')
plt.show()
'''
sresult['hullDistance'] = list(result_corrected['deltaH_formation'] -
result_corrected['deltaH_hull'])
sresult['deltaH_formation'] = list(result_corrected['deltaH_formation'])
'''
#
#
# goldshmitd vs dhhull
plt.scatter(sresult['t'].values, sresult['hullDistance'].values)
plt.show()
#
#
# goldschmidt validity
#plt.hist(sresult['t'].values)
c1, d1, e1 = plt.hist(sresult['t'].values, bins = 21)
plt.setp(e1, edgecolor='w', lw=1)
plt.title('D$_{3}$ Perovskite Validity')
plt.xlabel('Goldschmidt Tolerance Factor')
plt.ylabel('Count')
plt.show()
'''
plt.ylabel('$\Delta E_{hull}$ (eV)')
plt.xlabel('Sodium Mixing Fraction')
plt.title('Sodium $\Delta E_{hull}$ Trend')
s = 'fracNa'
s2 = 'hullDistance'
y_cl = sresult.groupby([s])[s2].mean()
x_cl = | np.array([i for i in y_cl.index]) | numpy.array |
# 4.3.2 ポアソン混合分布における推論:ギブスサンプリング
#%%
# 4.3.2項で利用するライブラリ
import numpy as np
from scipy.stats import poisson, gamma # ポアソン分布, ガンマ分布
import matplotlib.pyplot as plt
#%%
## 観測モデル(ポアソン混合分布)の設定
# 真のパラメータを指定
lambda_truth_k = np.array([10, 25, 40])
# 真の混合比率を指定
pi_truth_k = np.array([0.35, 0.25, 0.4])
# クラスタ数を取得
K = len(lambda_truth_k)
# 作図用のxの点を作成
x_line = np.arange(0, 2 * np.max(lambda_truth_k))
print(x_line)
# 観測モデルを計算
model_prob = 0.0
for k in range(K):
# クラスタkの分布の確率を計算
tmp_prob = poisson.pmf(k=x_line, mu=lambda_truth_k[k])
# K個の分布の加重平均を計算
model_prob += tmp_prob * pi_truth_k[k]
#%%
# 観測モデルを作図
plt.figure(figsize=(12, 9))
plt.bar(x=x_line, height=model_prob) # 真の分布
plt.xlabel('x')
plt.ylabel('prob')
plt.suptitle('Poisson Mixture Model', size = 20)
plt.title('$\lambda=[' + ', '.join([str(lmd) for lmd in lambda_truth_k]) + ']' +
', \pi=[' + ', '.join([str(pi) for pi in pi_truth_k])+ ']$', loc='left')
plt.show()
#%%
## 観測データの生成
# (観測)データ数を指定
N = 250
# 真のクラスタを生成
s_truth_nk = np.random.multinomial(n=1, pvals=pi_truth_k, size=N)
# 真のクラスタ番号を抽出
_, s_truth_n = np.where(s_truth_nk == 1)
# (観測)データを生成
#x_n = np.random.poisson(lam=np.prod(lambda_truth_k**s_truth_nk, axis=1), size=N)
x_n = np.random.poisson(lam=lambda_truth_k[s_truth_n], size=N)
print(x_n[:10])
#%%
# 観測データのヒストグラムを作成
plt.figure(figsize=(12, 9))
plt.bar(x=x_line, height=model_prob, label='true model',
color='white', alpha=1, edgecolor='red', linestyle='--') # 真の分布
plt.bar(x=x_line, height=[np.sum(x_n == x) / len(x_n) for x in x_line], label='observation data') # 観測データ
plt.xlabel('x')
plt.ylabel('dens')
plt.suptitle('Poisson Mixture Model', size=20)
plt.title('$N=' + str(N) +
', \lambda=[' + ', '.join([str(lmd) for lmd in lambda_truth_k]) + ']' +
', \pi=[' + ', '.join([str(pi) for pi in pi_truth_k]) + ']$', loc='left')
plt.legend()
plt.show()
#%%
# 真のクラスタのヒストグラムを作成
plt.figure(figsize=(12, 9))
for k in range(K):
plt.bar(x=x_line, height=[np.sum(x_n[s_truth_n == k] == x) for x in x_line],
alpha=0.5, label='cluster:' + str(k + 1)) # 真のクラスタ
plt.xlabel('x')
plt.ylabel('count')
plt.suptitle('Poisson Mixture Model', size=20)
plt.title('$N=' + str(N) +
', \lambda=[' + ', '.join([str(lmd) for lmd in lambda_truth_k]) + ']' +
', \pi=[' + ', '.join([str(pi) for pi in pi_truth_k]) + ']$', loc='left')
plt.legend()
plt.show()
#%%
## 事前分布(ガンマ分布とディリクレ分布)の設定
# lambdaの事前分布のパラメータを指定
a = 1.0
b = 1.0
# piの事前分布のパラメータを指定
alpha_k = np.repeat(2.0, K)
#%%
## 初期値の設定
# lambdaを生成
lambda_k = np.random.gamma(shape=a, scale=1 / b, size=K)
print(lambda_k)
# piを生成
pi_k = np.random.dirichlet(alpha=alpha_k, size=1).reshape(K)
print(pi_k)
#%%
# 初期値による混合分布を計算
init_prob = 0.0
for k in range(K):
# クラスタkの分布の確率を計算
tmp_prob = poisson.pmf(k=x_line, mu=lambda_k[k])
# K個の分布の加重平均を計算
init_prob += tmp_prob * pi_k[k]
# 初期値による分布を作図
plt.figure(figsize=(12, 9))
plt.bar(x_line, init_prob) # 初期値による分布
plt.xlabel('x')
plt.ylabel('prob')
plt.suptitle('Poisson Mixture Model', size = 20)
plt.title('$iter:' + str(0) +
', \lambda=[' + ', '.join([str(lmd) for lmd in np.round(lambda_k, 2)]) + ']' +
', \pi=[' + ', '.join([str(pi) for pi in | np.round(pi_k, 2) | numpy.round |
from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
pio.templates.default = "plotly_white"
pio.renderers.default = "browser"
def test_univariate_gaussian():
# Question 1 - Draw samples and print fitted model
expectation, variance = 10, 1
sample_size = 1000
samples = np.random.normal(expectation, variance, sample_size)
uni_gaussian_fitter = UnivariateGaussian().fit(samples)
print("(%f, %f)" % (uni_gaussian_fitter.mu_, uni_gaussian_fitter.var_))
# Question 2 - Empirically showing sample mean is consistent
sample_sizes = np.arange(10, sample_size+1, 10)
error_distances = np.empty(sample_sizes.size)
for i in range(sample_sizes.size):
part_of_sample = sample_sizes[i]
uni_gaussian_fitter.fit(samples[0:part_of_sample+1])
error_distances[i] = np.abs(uni_gaussian_fitter.mu_ - expectation)
fig = go.Figure(data=go.Scatter(x=sample_sizes, y=error_distances))
fig.update_layout(title="The absolute distance between the estimated- and true value of the expectation")
fig.update_xaxes(title_text="Sample Size",)
fig.update_yaxes(title_text="Distance between the estimated and true expectation")
fig.show()
# Question 3 - Plotting Empirical PDF of fitted model
sample_pdf = uni_gaussian_fitter.pdf(samples)
pdf_fig = go.Figure(data=go.Scatter(x=samples, y=sample_pdf, mode='markers'))
pdf_fig.update_layout(title="The empirical PDF function of each sample")
pdf_fig.update_xaxes(title_text="Sample Value", )
pdf_fig.update_yaxes(title_text="Fitted PDF Value of the Sample")
pdf_fig.show()
def test_multivariate_gaussian():
# Question 4 - Draw samples and print fitted model
mean = np.array([0, 0, 4, 0]).T
cov = np.array([
[1, 0.2, 0, 0.5],
[0.2, 2, 0, 0],
[0, 0, 1, 0],
[0.5, 0, 0, 1]
])
sample_size = 1000
samples = | np.random.multivariate_normal(mean, cov, sample_size) | numpy.random.multivariate_normal |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.