prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
from hsi_toolkit.util import img_det
from hsi_toolkit.dev.dim_reduction import mnf
import numpy as np
def mtmf_statistic(hsi_img,tgt_sig, mask = None):
"""
Mixture Tuned Matched Filter Infeasibility Statistic
Inputs:
hsi_image - n_row x n_col x n_band hyperspectral image
tgt_sig - target signature (n_band x 1 - column vector)
mask - binary image limiting detector operation to pixels where mask is true
if not present or empty, no mask restrictions are used
Outputs:
mtmf_out - MTMF infeasibility statistic
alpha - matched filter output
8/12/2012 - <NAME> - <EMAIL>
12/2018 - Python Implementation by Yutai Zhou
"""
if tgt_sig.ndim == 1:
tgt_sig = tgt_sig[:, np.newaxis]
mnf_img, n_dim, mnf_vecs, mnf_eigvals, mnf_mu = mnf(hsi_img,1);
# tgt_sig = tgt_sig[:n_dim,0][:,np.newaxis]
s = mnf_vecs @ (tgt_sig - mnf_mu)
mtmf_out, kwargsout = img_det(mtmf_helper, mnf_img, s, mnf_eigvals = mnf_eigvals)
return mtmf_out, kwargsout['alpha']
def mtmf_helper(hsi_data, tgt_sig, kwargs):
mnf_eigvals = kwargs['mnf_eigvals']
n_band, n_pixel = hsi_data.shape
z = hsi_data
s = tgt_sig
sts = s.T @ s
alpha = np.zeros(n_pixel)
mtmf_data = np.zeros(n_pixel)
ev = | np.sqrt(mnf_eigvals) | numpy.sqrt |
"""
********************************************************************************
2D Robot Localization - Benchmark
********************************************************************************
Goals of this script:
- implement different UKFs on the 2D robot localization example.
- design the Extended Kalman Filter (EKF) and the Invariant Extended Kalman
Filter (IEKF) :cite:`barrauInvariant2017`.
- compare the different algorithms with Monte-Carlo simulations.
*We assume the reader is already familiar with the considered problem described
in the tutorial.*
We previously designed an UKF with a standard uncertainty representation. An
advantage of the versatility of the UKF is to speed up implementation, tests,
and comparision of algorithms with different uncertainty representations.
Indeed, for the given problem, three different UKFs emerge, defined respectively
as:
1) The state is embedded in :math:`SO(2) \\times \mathbb{R}^2`, where:
* the retraction :math:`\\varphi(.,.)` is the :math:`SO(2)` exponential
for orientation and the vector addition for position.
* the inverse retraction :math:`\\varphi^{-1}(.,.)` is the :math:`SO(2)`
logarithm for orientation and the vector subtraction for position.
2) The state is embedded in :math:`SE(2)` with left multiplication, i.e.
- the retraction :math:`\\varphi(.,.)` is the :math:`SE(2)` exponential,
where the state multiplies on the left the uncertainty
:math:`\\boldsymbol{\\xi}`.
- the inverse retraction :math:`\\varphi^{-1}(.,.)` is the :math:`SE(2)`
logarithm.
- this left UKF on :math:`SE(2)` corresponds to the Invariant Extended Kalman
Filter (IEKF) recommended in :cite:`barrauInvariant2017`.
3) The state is embedded in :math:`SE(2)` with right multiplication, i.e.
- the retraction :math:`\\varphi(.,.)` is the :math:`SE(2)` exponential,
where the state multiplies on the right the uncertainty
:math:`\\boldsymbol{\\xi}`.
- the inverse retraction :math:`\\varphi^{-1}(.,.)` is the :math:`SE(2)`
logarithm.
We tests the filters on simulation with strong initial heading error.
"""
################################################################################
# Import
# ==============================================================================
from ukfm import SO2, UKF, EKF
from ukfm import LOCALIZATION as MODEL
import ukfm
import numpy as np
import matplotlib
ukfm.utils.set_matplotlib_config()
################################################################################
# We compare the filters on a large number of Monte-Carlo runs.
# Monte-Carlo runs
N_mc = 100
################################################################################
# Simulation Setting
# ==============================================================================
# We set the simulation as in :cite:`barrauInvariant2017`, section IV. The robot
# drives along a 10 m diameter circle for 40 seconds with high rate odometer
# measurements (100 Hz) and low rate GPS measurements (1 Hz). The vehicle gets
# moderate angular velocity uncertainty and highly precise linear velocity. The
# initial values of the heading error is very strong, **45° standard
# deviation**, while the initial position is known.
# sequence time (s)
T = 40
# odometry frequency (Hz)
odo_freq = 100
# create the model
model = MODEL(T, odo_freq)
# odometry noise standard deviation
odo_std = np.array([0.01, # speed (v/m)
0.01, # speed (v/m)
1 / 180 * np.pi]) # angular speed (rad/s)
# GPS frequency (Hz)
gps_freq = 1
# GPS noise standard deviation (m)
gps_std = 1
# radius of the circle trajectory (m)
radius = 5
# initial heading error standard deviation
theta0_std = 45/180*np.pi
################################################################################
# Filter Design
# ==============================================================================
# The UKFs are compared to an Extended Kalman FIlter (EKF) and an Invariant EKF
# (IEKF). The EKF has the same uncertainty representation as the UKF with the
# retraction on :math:`SO(2) \times \mathbb{R}^2`, whereas the IEKF has the same
# uncertainty representation as the UKF with the left retraction on
# :math:`SE(2)`.
# propagation noise covariance matrix
Q = np.diag(odo_std**2)
# measurement noise covariance matrix
R = gps_std**2*np.eye(2)
# initial covariance matrix
P0 = np.zeros((3, 3))
# we take into account initial heading error
P0[0, 0] = theta0_std ** 2
# sigma point parameter
alpha = np.array([1e-3, 1e-3, 1e-3])
################################################################################
# We set error variables before launching Monte-Carlo simulations. As we have
# five similar methods, the code is redundant.
ukf_err = np.zeros((N_mc, model.N, 3))
left_ukf_err = np.zeros_like(ukf_err)
right_ukf_err = np.zeros_like(ukf_err)
iekf_err = np.zeros_like(ukf_err)
ekf_err = np.zeros_like(ukf_err)
################################################################################
# We record Normalized Estimation Error Squared (NEES) for consistency
# evaluation (see Results).
ukf_nees = | np.zeros((N_mc, model.N, 2)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
###############################################################################
#
# autoPACK Authors: <NAME>, <NAME>, <NAME>, <NAME>
# Based on COFFEE Script developed by <NAME> between 2005 and 2010
# with assistance from <NAME> in 2009 and periodic input
# from Arthur Olson's Molecular Graphics Lab
#
# BaseGrid.py Authors: <NAME> & <NAME> with editing/enhancement from Ludovic Autin
#
# Translation to Python initiated March 1, 2010 by <NAME> with <NAME>
#
# Class restructuring and organization: Michel Sanner
#
# Copyright: <NAME> ©2010
#
# This file "BaseGrid.py" is part of autoPACK, cellPACK.
#
# autoPACK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# autoPACK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with autoPACK (See "CopyingGNUGPL" in the installation.
# If not, see <http://www.gnu.org/licenses/>.
#
#
###############################################################################
@author: <NAME>, <NAME>, & <NAME>
"""
import logging
import numpy
from scipy import spatial
import math
from math import ceil, floor
from random import randrange
import cellpack.autopack as autopack
from cellpack.autopack.ldSequence import cHaltonSequence3
from cellpack.mgl_tools.RAPID import RAPIDlib
from cellpack.mgl_tools.bhtree import bhtreelib
# Kevin Grid point class
class gridPoint:
def __init__(self, i, globalC, isPolyhedron):
self.index = int(i)
self.isOutside = None
self.minDistance = (
99999 # Only store a number here if within certain distance from polyhedron
)
self.representsPolyhedron = isPolyhedron
self.closeFaces = []
self.closestFaceIndex = 0
self.testedEndpoint = None
self.allDistances = (
[]
) # Stores a tuple list of distances to all points. (point,distance) = (5,2.5)
self.globalCoord = numpy.array(
globalC
) # Stores the global coordinate associated with this point
class BaseGrid:
"""
The Grid class
==========================
This class handle the use of grid to control the packing. The grid keep information of
3d positions, distances, freePoints and inside/surface points from organelles.
NOTE : thi class could be completely replace if openvdb is wrapped to python.
"""
@staticmethod
def reorder_free_points(pt, freePoints, nbFreePoints):
# TODO: move this to env class, ing shouldn't aware of the whole grid
# Swap the newly inside point value with the value of the last free point
# Point will no longer be considered "free" because it will be beyond the range of
# nbFreePoints. The value of the point itself is the history of it's original index
# so any future swaps will still result in the correct index being move into the range
# of nbFreePoints
nbFreePoints -= 1
vKill = freePoints[pt]
vLastFree = freePoints[nbFreePoints]
freePoints[vKill] = vLastFree
freePoints[vLastFree] = vKill
# Turn on these printlines if there is a problem with incorrect points showing in display points
# self.log.debug("*************pt = masterGridPointValue = %d", pt)
# self.log.debug("nbFreePointAfter = %d", nbFreePoints)
# self.log.debug("vKill = %d", vKill)
# self.log.debug("vLastFree = %d", vLastFree)
# self.log.debug("freePoints[vKill] = %d", freePoints[vKill])
# self.log.debug("freePoints[vLastFree] = %d", freePoints[vLastFree])
# self.log.debug("pt = masterGridPointValue = %d", pt)
# self.log.debug("freePoints[nbFreePoints-1] = %d", freePoints[nbFreePoints])
# self.log.debug("freePoints[pt] = %d", freePoints[pt])
# freePoints will now have all the available indices between 0 and nbFreePoints in
# freePoints[nbFreePoints:] won't necessarily be the indices of inside points
return freePoints, nbFreePoints
@staticmethod
def updateDistances(
insidePoints,
newDistPoints,
freePoints,
nbFreePoints,
distance,
):
# self.log.info(
# "*************updating Distances %d %d", nbFreePoints, len(insidePoints)
# )
# TODO: move this to env class, ing shouldn't aware of the whole grid
# t1 = time()
# distChanges = {}
for pt, dist in list(insidePoints.items()):
try:
freePoints, nbFreePoints = BaseGrid.reorder_free_points(
pt, freePoints, nbFreePoints
)
except Exception:
print(pt, "not in freeePoints********************************")
pass
distance[pt] = dist
# self.log.debug("update free points loop %d", time() - t1)
# t2 = time()
for pt, dist in list(newDistPoints.items()):
if pt not in insidePoints:
distance[pt] = dist
# self.log.debug("update distance loop %d", time() - t2)
return nbFreePoints
def __init__(
self, boundingBox=([0, 0, 0], [0.1, 0.1, 0.1]), space=1, setup=True, lookup=0
):
self.log = logging.getLogger("grid")
self.log.propagate = False
# a grid is attached to an environnement
self.boundingBox = boundingBox
# this list provides the id of the component this grid points belongs
# to. The id is an integer where 0 is the Histological Volume, and +i is
# the surface of compartment i and -i is the interior of compartment i
# in the list self. compartments
self.gridPtId = []
# will be a list of indices into 3D of compartment
# of points that have not yet been used by the fill algorithm
# entries are removed from this list as grid points are used up
# during hte fill. This list is used to pick points randomly during
# the fill
self.freePoints = []
self.nbFreePoints = 0
# this list evolves in parallel with self.freePoints and provides
# the distance to the closest surface (either an already placed
# object (or an compartment surface NOT IMPLEMENTED)
self.distToClosestSurf = []
self.distToClosestSurf_store = []
self.diag = self.getDiagonal()
self.gridSpacing = space # * 1.1547#cubic grid with a diagonal spacing equal to that smallest packing radius
self.nbGridPoints = None
self.nbSurfacePoints = 0
self.gridVolume = 0 # will be the total number of grid points
# list of (x,y,z) for each grid point (x index moving fastest)
self.masterGridPositions = []
self._x = None
self._y = None
self._z = None
# this are specific for each compartment
self.aInteriorGrids = []
self.aSurfaceGrids = []
# bhtree
self.surfPtsBht = None
self.ijkPtIndice = []
self.filename = None # used for storing before fill so no need rebuild
self.result_filename = None # used after fill to store result
self.tree = None
self.tree_free = None
self.encapsulatingGrid = 0
self.lookup = lookup
self.center = None
self.backup = None
if setup:
self.setup(boundingBox, space)
# use np.roll to have periodic condition
# what about collision ?
def setup(self, boundingBox, space):
# TODO : verify the gridSpacing calculation / setup after reading the recipe
if space == 0:
space = 20
self.gridSpacing = space # * 1.1547
self.boundingBox = boundingBox
if self.lookup == 0:
self.create3DPointLookupCover()
elif self.lookup == 1:
self.create3DPointLookup()
elif self.lookup == 2:
self.create3DPointLookup_loop()
nx, ny, nz = self.nbGridPoints
self.ijkPtIndice = self.cartesian([range(nx), range(ny), range(nz)])
self.getDiagonal()
self.nbSurfacePoints = 0
self.log.info("SETUP BASE GRID %d %d", self.gridVolume, self.gridSpacing)
self.gridPtId = numpy.zeros(self.gridVolume, "i") # [0]*nbPoints
# self.distToClosestSurf = [self.diag]*self.gridVolume#surface point too?
self.distToClosestSurf = (
numpy.ones(self.gridVolume) * self.diag
) # (self.distToClosestSurf)
self.freePoints = list(range(self.gridVolume))
self.nbFreePoints = len(self.freePoints)
self.log.info(
"Lookup: %d, bounding box: %r, gridSpacing %r, length gridPtId %r",
self.lookup,
boundingBox,
self.gridSpacing,
len(self.gridPtId),
)
self.setupBoundaryPeriodicity()
return self.gridSpacing
def reset(
self,
):
# reset the distToClosestSurf and the freePoints
# boundingBox should be the same otherwise why keeping the grid
# self.gridPtId = numpy.zeros(self.gridVolume,'i')
# self.distToClosestSurf = numpy.ones(self.gridVolume)*self.diag#(self.distToClosestSurf)
self.log.info("reset Grid distance to closest surface and freePoints")
self.distToClosestSurf = (
numpy.array(self.distToClosestSurf[:]) * 0.0
) + self.diag
# self.distToClosestSurf[:] = self.diag # numpy.array([self.diag]*len(self.distToClosestSurf))#surface point too?
self.freePoints = list(range(len(self.freePoints)))
self.nbFreePoints = len(self.freePoints)
def removeFreePoint(self, pti):
tmp = self.freePoints[self.nbFreePoints] # last one
self.freePoints[self.nbFreePoints] = pti
self.freePoints[pti] = tmp
self.nbFreePoints -= 1
def getDiagonal(self, boundingBox=None):
if boundingBox is None:
boundingBox = self.boundingBox
self.diag = numpy.sqrt(
(numpy.array(boundingBox[0]) - numpy.array(boundingBox[1])) ** 2
).sum()
return self.diag
def create3DPointLookup_loop(self, boundingBox=None):
"""
Fill the orthogonal bounding box described by two global corners
with an array of points spaces pGridSpacing apart.:
"""
if boundingBox is None:
boundingBox = self.boundingBox
xl, yl, zl = boundingBox[0]
self.gridVolume, self.nbGridPoints = self.computeGridNumberOfPoint(
boundingBox, self.gridSpacing
)
nx, ny, nz = self.nbGridPoints
pointArrayRaw = numpy.zeros((nx * ny * nz, 3), "f")
self.ijkPtIndice = numpy.zeros((nx * ny * nz, 3), "i") # this is unused
space = self.gridSpacing
# Vector for lower left broken into real of only the z coord.
i = 0
padding = space / 2.0
for zi in range(nz):
for yi in range(ny):
for xi in range(nx):
x = xl + xi * space + padding
y = yl + yi * space + padding
z = zl + zi * space + padding
pointArrayRaw[i] = (x, y, z)
self.ijkPtIndice[i] = (xi, yi, zi)
i += 1
self.log.info("grid spacing %d", space)
self.masterGridPositions = pointArrayRaw
def create3DPointLookup(self, boundingBox=None):
"""
Fill the orthogonal bounding box described by two global corners
with an array of points spaces pGridSpacing apart. Optimized version using
numpy broadcasting
"""
if boundingBox is None:
boundingBox = self.boundingBox
space = self.gridSpacing
# we want the diagonal of the voxel, not the diagonal of the plane, so the second 1.1547 is was incorrect
environmentBoxEqualFillBox = False
self.log.info("Using create3DPointLookup")
if environmentBoxEqualFillBox: # environment.environmentBoxEqualFillBox:
self._x = x = numpy.arange(
boundingBox[0][0], boundingBox[1][0], space
) # *1.1547) gridspacing is already multiplied by 1.1547
self._y = y = numpy.arange(
boundingBox[0][1], boundingBox[1][1], space
) # *1.1547)
self._z = z = numpy.arange(
boundingBox[0][2], boundingBox[1][2], space
) # *1.1547)
else:
self._x = x = numpy.arange(
boundingBox[0][0] - space, boundingBox[1][0] + space, space
) # *1.1547) gridspacing is already multiplied by 1.1547
self._y = y = numpy.arange(
boundingBox[0][1] - space, boundingBox[1][1] + space, space
) # *1.1547)
self._z = z = numpy.arange(
boundingBox[0][2] - space, boundingBox[1][2] + space, space
) # *1.1547)
nx = len(
x
) # sizes must be +1 or the right, top, and back edges don't get any points using this numpy.arange method
ny = len(y)
nz = len(z)
# Dec 5 2013, we need to confirm that the getPointsInBox function is also using +1, or potential neighbors will be missed
# This used to be fine, but it may have changed?
self.nbGridPoints = [nx, ny, nz]
self.gridVolume = nx * ny * nz
self.ijkPtIndice = numpy.ndindex(nx, ny, nz)
# this is 60% faster than the for loop
# self.masterGridPositions = numpy.array(list(numpy.broadcast(*numpy.ix_(x, y, z))))
# self.masterGridPositions = numpy.vstack(numpy.meshgrid(x,y,z)).reshape(3,-1).T
self.masterGridPositions = (
numpy.vstack(numpy.meshgrid(x, y, z, copy=False)).reshape(3, -1).T
)
# this ay be faster but don't know the implication
# from http://stackoverflow.com/questions/18253210/creating-a-numpy-array-of-3d-coordinates-from-three-1d-arrays
def create3DPointLookupCover(self, boundingBox=None):
"""
Fill the orthogonal bounding box described by two global corners
with an array of points spaces pGridSpacing apart. Optimized version using
numpy broadcasting
"""
if boundingBox is None:
boundingBox = self.boundingBox
space = self.gridSpacing
S = numpy.array(boundingBox[1]) - numpy.array(boundingBox[0])
NX, NY, NZ = numpy.around(S / (self.gridSpacing)) # / 1.1547))
if NX == 0:
NX = 1
if NY == 0:
NY = 1
if NZ == 0:
NZ = 1
self.log.info("using create3DPointLookupCover")
# we want the diagonal of the voxel, not the diagonal of the plane, so the second 1.1547 is was incorrect
environmentBoxEqualFillBox = True
# np.linspace(2.0, 3.0, num=5)
if environmentBoxEqualFillBox: # environment.environmentBoxEqualFillBox:
self._x = x = numpy.linspace(
boundingBox[0][0], boundingBox[1][0], int(NX)
) # *1.1547) gridspacing is already multiplied by 1.1547
self._y = y = numpy.linspace(
boundingBox[0][1], boundingBox[1][1], int(NY)
) # *1.1547)
self._z = z = numpy.linspace(
boundingBox[0][2], boundingBox[1][2], int(NZ)
) # *1.1547)
else:
self._x = x = numpy.arange(
boundingBox[0][0], boundingBox[1][0] + space, space
) # *1.1547) gridspacing is already multiplied by 1.1547
self._y = y = numpy.arange(
boundingBox[0][1], boundingBox[1][1] + space, space
) # *1.1547)
self._z = z = numpy.arange(
boundingBox[0][2], boundingBox[1][2] + space, space
) # *1.1547)
xyz = numpy.meshgrid(x, y, z, copy=False)
nx = len(
x
) # sizes must be +1 or the right, top, and back edges don't get any points using this numpy.arange method
ny = len(y)
nz = len(z)
self.gridSpacing = (x[1] - x[0]) * 1.1547 # ? should I multiply here ?
self.nbGridPoints = [nx, ny, nz]
self.gridVolume = nx * ny * nz
self.ijkPtIndice = numpy.ndindex(nx, ny, nz)
self.masterGridPositions = numpy.vstack(xyz).reshape(3, -1).T
# self.masterGridPositions = numpy.vstack(numpy.meshgrid(x,y,z,copy=False)).reshape(3,-1).T
def getPointCompartmentId(self, point, ray=1):
# check if point inside on of the compartments
# surface point ?
n_comp = len(self.histoVol.compartments)
if n_comp:
for i in range(n_comp):
inside = self.checkPointInside_rapid(
self.histoVol.compartments[i],
point,
self.histoVol.grid.diag,
ray=ray,
)
if inside:
return -(i + 1)
# comp=comp-1
# the point is not inside , is it on the surface ? ie distance to surface < X?
for i in range(n_comp):
distance, nb = self.histoVol.compartments[i].OGsrfPtsBht.query(point)
if distance < 10.0:
return i + 1
return 0
def getClosestGridPoint(self, pt3d):
if self.tree is None:
self.tree = spatial.cKDTree(self.masterGridPositions, leafsize=10)
distance, nb = self.tree.query(pt3d) # len of ingr posed so far
return distance, nb
def getClosestFreeGridPoint(
self, pt3d, compId=None, updateTree=True, ball=0.0, distance=0.0
):
free_indices = self.freePoints[: self.nbFreePoints]
arr = numpy.array(self.masterGridPositions[free_indices])
indices = numpy.nonzero(numpy.equal(self.gridPtId[free_indices], compId))
distances = self.distToClosestSurf[free_indices]
if not len(indices):
return None
tree_free = spatial.cKDTree(arr[indices], leafsize=10)
arr = arr[indices]
# arr of free indice in compartments
res = tree_free.query_ball_point(pt3d, ball) # max distance
if not len(res):
return None
all_distances = distances[res]
all_pts = arr[res]
ind = numpy.nonzero(
numpy.logical_and(
numpy.greater_equal(all_distances, distance),
numpy.less(all_distances, distance * 1.5),
)
)[0]
if not len(ind):
return None
# should pick closest ?
targetPoint = all_pts[
ind[randrange(len(ind))]
] # randomly pick free surface point at given distance
return targetPoint
free_indices = self.freePoints[: self.nbFreePoints]
arr = numpy.array(self.masterGridPositions[free_indices])
if self.tree_free is None or updateTree:
if compId is not None:
arr = numpy.array(self.masterGridPositions[free_indices])
indices = numpy.nonzero(
numpy.equal(self.gridPtId[free_indices], compId)
)
self.tree_free = spatial.cKDTree(arr[indices], leafsize=10)
arr = arr[indices]
else:
self.tree_free = spatial.cKDTree(
self.masterGridPositions[: self.nbFreePoints], leafsize=10
)
if distance != 0.0:
res = self.tree_free.query_ball_point(pt3d, distance) #
return 0, res, arr
else:
res = self.tree_free.query(pt3d) # len of ingr posed so far
return res, arr
def cartesian(self, arrays, out=None):
"""
#http://stackoverflow.com/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays
Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [numpy.asarray(x) for x in arrays]
dtype = arrays[0].dtype
n = numpy.prod([x.size for x in arrays])
if out is None:
out = numpy.zeros([n, len(arrays)], dtype=dtype)
m = int(n / arrays[0].size)
out[:, 0] = numpy.repeat(arrays[0], m)
if arrays[1:]:
self.cartesian(arrays[1:], out=out[0:m, 1:])
for j in range(1, arrays[0].size):
out[j * m : (j + 1) * m, 1:] = out[0:m, 1:]
return out
def getPointFrom3D(self, pt3d):
"""
get point number from 3d coordinates
"""
x, y, z = pt3d # Continuous 3D point to be discretized
spacing1 = (
1.0 / self.gridSpacing
) # Grid spacing = diagonal of the voxel determined by smallest packing radius
(
NX,
NY,
NZ,
) = (
self.nbGridPoints
) # vector = [length, height, depth] of grid, units = gridPoints
OX, OY, OZ = self.boundingBox[0] # origin of fill grid
# Algebra gives nearest gridPoint ID to pt3D
i = min(NX - 1, max(0, round((x - OX) * spacing1)))
j = min(NY - 1, max(0, round((y - OY) * spacing1)))
k = min(NZ - 1, max(0, round((z - OZ) * spacing1)))
return int(k * NX * NY + j * NX + i)
def getIJK(self, ptInd):
"""
get i,j,k (3d) indices from u (1d)
only work for grid point, not compartments points
"""
if ptInd > self.nbGridPoints[0] * self.nbGridPoints[1] * self.nbGridPoints[2]:
return [0, 0, 0]
return self.ijkPtIndice[ptInd]
def setupBoundaryPeriodicity(self):
# we create a dictionary for the adjacent cell of the current grid.
self.sizeXYZ = numpy.array(self.boundingBox[1]) - numpy.array(
self.boundingBox[0]
)
self.periodic_table = {}
self.periodic_table["left"] = (
numpy.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) * self.sizeXYZ
)
self.periodic_table["right"] = (
numpy.array([[-1, 0, 0], [0, -1, 0], [0, 0, -1]]) * self.sizeXYZ
)
def getPositionPeridocity(self, pt3d, jitter, cutoff):
tr = []
if autopack.biasedPeriodicity:
biased = numpy.array(autopack.biasedPeriodicity)
else:
biased = numpy.array(jitter)
if not autopack.testPeriodicity:
return tr
ox, oy, oz = self.boundingBox[0]
ex, ey, ez = self.boundingBox[1]
px, py, pz = pt3d
p_xyz = [0, 0, 0]
# can I use rapid and find the collision ?
# distance plane X
dist_origin_x = px - ox
dist_edge_x = ex - px
dx = 0
if dist_origin_x < dist_edge_x:
dx = dist_origin_x # 1
p_xyz[0] = 1
else:
dx = dist_edge_x # -1
p_xyz[0] = -1
if dx < cutoff and dx != 0:
pass
else:
p_xyz[0] = 0
# distance plane Y
doy = py - oy
dey = ey - py
dy = 0
if doy < dey:
dy = doy # 1
p_xyz[1] = 1
else:
dy = dey # -1
p_xyz[1] = -1
if dy < cutoff and dy != 0.0:
pass
else:
p_xyz[1] = 0
# distance plane Z
doz = pz - oz
dez = ez - pz
dz = 0
if doz < dez:
dz = doz # 1
p_xyz[2] = 1
else:
dz = dez # -1
p_xyz[2] = -1
if dz < cutoff and dz != 0:
pass
else:
p_xyz[2] = 0
p_xyz = numpy.array(p_xyz) * biased
# for 2D we need 3 corner tiles
# for 3D we need 7 corner tiles
corner = numpy.zeros((4, 3))
indices_non_zero = numpy.nonzero(p_xyz)[0]
for i in indices_non_zero:
# i is the axis that is close to the point
tr.append(pt3d + (self.periodic_table["left"][i] * p_xyz[i])) # 0,1,2
corner[0] += self.periodic_table["left"][i] * p_xyz[i] # 1
# the corner are
# X+Y+Z corner[0]
# X+Y+0 corner[1]
# X+0+Z corner[2]
# 0+Y+Z corner[3]
if len(indices_non_zero) == 2:
# two axis cross-> three pos
tr.append(pt3d + corner[0])
if len(indices_non_zero) == 3:
# in a corner need total 7 pos, never happen in 2D
corner[1] = (
self.periodic_table["left"][0] * p_xyz[0]
+ self.periodic_table["left"][1] * p_xyz[1]
)
corner[2] = (
self.periodic_table["left"][0] * p_xyz[0]
+ self.periodic_table["left"][2] * p_xyz[2]
)
corner[3] = (
self.periodic_table["left"][1] * p_xyz[1]
+ self.periodic_table["left"][2] * p_xyz[2]
)
for i in range(4): # 4+1=5
tr.append(pt3d + corner[i])
return tr
def checkPointInside(self, pt3d, dist=None, jitter=[1, 1, 1], bb=None):
"""
Check if the given 3d points is inside the grid
"""
if bb is None:
bb = self.boundingBox
origin = numpy.array(bb[0])
edge = numpy.array(bb[1])
for i in range(len(edge)):
if edge[i] < self.gridSpacing:
edge[i] = self.gridSpacing
packing_location = numpy.array(pt3d) # *jitter
test1 = packing_location < origin
test2 = packing_location > edge
if True in test1 or True in test2:
# outside
return False
else:
if dist is not None:
# distance to closest wall
d1 = (packing_location - origin) * jitter
s1 = min(x for x in d1[d1 != 0] if x != 0)
d2 = (edge - packing_location) * jitter
s2 = min(x for x in d2[d2 != 0] if x != 0)
if s1 <= dist or s2 <= dist:
self.log.info("s1 s2 smaller than dist %d %d %d", s1, s2, dist)
return False
return True
def getCenter(self):
"""
Get the center of the grid
"""
if self.center is None:
self.center = [0.0, 0.0, 0.0]
for i in range(3):
self.center[i] = (self.boundingBox[0][i] + self.boundingBox[1][i]) / 2.0
return self.center
def getRadius(self):
"""
Get the radius the grid
"""
d = numpy.array(self.boundingBox[0]) - numpy.array(self.boundingBox[1])
s = numpy.sum(d * d)
return math.sqrt(s)
def getPointsInSphere(self, pt, radius):
if self.tree is None:
self.tree = spatial.cKDTree(self.masterGridPositions, leafsize=10)
# add surface points
ptIndices = self.tree.query_ball_point(pt, radius) # , n_jobs=-1)
return ptIndices
def getPointsInCubeFillBB(self, bb, pt, radius, addSP=True, info=False):
"""
Return all grid points indices inside the given bounding box.
NOTE : need to fix with grid build with numpy arrange
"""
spacing1 = 1.0 / self.gridSpacing
NX, NY, NZ = self.nbGridPoints
OX, OY, OZ = self.boundingBox[
0
] # origin of fill grid-> bottom lef corner not origin
ox, oy, oz = bb[0]
ex, ey, ez = bb[1]
i0 = int(max(0, floor((ox - OX) * spacing1)))
i1 = int(min(NX, int((ex - OX) * spacing1)) + 1)
j0 = int(max(0, floor((oy - OY) * spacing1)))
j1 = int(min(NY, int((ey - OY) * spacing1)) + 1)
k0 = int(max(0, floor((oz - OZ) * spacing1)))
k1 = int(min(NZ, int((ez - OZ) * spacing1)) + 1)
i0 = int(min(NX - 1, max(0, round((ox - OX) * spacing1))))
j0 = int(min(NY - 1, max(0, round((oy - OY) * spacing1))))
k0 = int(min(NZ - 1, max(0, round((oz - OZ) * spacing1))))
i1 = int(min(NX, max(0, round((ex - OX) * spacing1))))
j1 = int(min(NY, max(0, round((ey - OY) * spacing1))))
k1 = int(min(NZ, max(0, round((ez - OZ) * spacing1))))
if NZ == 1:
k0 = 0
k1 = 1
elif NY == 1:
j0 = 0
j1 = 1
elif NX == 1:
i0 = 0
i1 = 1
ptIndices = []
pid = numpy.mgrid[i0:i1, j0:j1, k0:k1]
ijk = numpy.vstack(pid).reshape(3, -1).T
# in case 2D, meaning one of the dimension is 1
if NZ == 1:
ptIndices = [p[2] + p[1] + NX * p[0] for p in ijk]
elif NY == 1:
ptIndices = [p[2] + p[1] + NX * p[0] for p in ijk]
elif NX == 1:
ptIndices = [p[2] + NY * p[1] + p[0] for p in ijk]
else:
0.02451198
# add surface points
if addSP and self.nbSurfacePoints != 0:
result = numpy.zeros((self.nbSurfacePoints,), "i")
nb = self.surfPtsBht.closePoints(tuple(pt), radius, result)
# nb = self.surfPtsBht.query(tuple(pt),k=self.nbSurfacePoints)
ptIndices.extend(
list(map(lambda x, length=self.gridVolume: x + length, result[:nb]))
)
return ptIndices
def test_points_in_bb(self, bb, pt):
# given a bounding box, does the point is contains in it
origin = numpy.array(bb[0])
E = numpy.array(bb[1])
P = numpy.array(pt) # *jitter
test1 = P < origin
test2 = P > E
inside = False
if True in test1 or True in test2:
# outside
inside = False
return inside
def getPointsInCube(self, bb, pt, radius, addSP=True, info=False):
"""
Return all grid points indicesinside the given bounding box.
"""
spacing1 = 1.0 / self.gridSpacing
NX, NY, NZ = self.nbGridPoints
OX, OY, OZ = self.boundingBox[
0
] # origin of Pack grid-> bottom lef corner not origin
ox, oy, oz = bb[0]
ex, ey, ez = bb[1]
i0 = int(max(0, floor((ox - OX) * spacing1)))
i1 = int(min(NX, int((ex - OX) * spacing1) + 1))
j0 = int(max(0, floor((oy - OY) * spacing1)))
j1 = int(min(NY, int((ey - OY) * spacing1) + 1))
k0 = int(max(0, floor((oz - OZ) * spacing1)))
k1 = int(min(NZ, int((ez - OZ) * spacing1) + 1))
zPlaneLength = NX * NY
ptIndices = []
for z in range(int(k0), int(k1)):
offz = z * zPlaneLength
for y in range(int(j0), int(j1)):
off = y * NX + offz
for x in range(int(i0), int(i1)):
ptIndices.append(x + off)
# add surface points
if addSP and self.nbSurfacePoints != 0:
result = numpy.zeros((self.nbSurfacePoints,), "i")
nb = self.surfPtsBht.closePoints(tuple(pt), radius, result)
dimx, dimy, dimz = self.nbGridPoints
ptIndices.extend(
list(map(lambda x, length=self.gridVolume: x + length, result[:nb]))
)
return ptIndices
def computeGridNumberOfPoint(self, boundingBox, space):
"""
Return the grid size : total number of point and number of point per axes
"""
xl, yl, zl = boundingBox[0]
xr, yr, zr = boundingBox[1]
encapsulatingGrid = self.encapsulatingGrid
# Graham Added on Oct17 to allow for truly 2D grid for test fills... may break everything!
nx = int(ceil((xr - xl) / space)) + encapsulatingGrid
ny = int(ceil((yr - yl) / space)) + encapsulatingGrid
nz = int(ceil((zr - zl) / space)) + encapsulatingGrid
# nx = nx if (nx == 1) else nx-1
# ny = ny if (ny == 1) else ny-1
# nz = nz if (nz == 1) else nz-1
return nx * ny * nz, (nx, ny, nz)
def set_surfPtsBht(self, verts):
self.surfPtsBht = None
if verts is not None and len(verts):
self.surfPtsBht = bhtreelib.BHtree(verts, None, 10)
self.nbSurfacePoints = len(verts)
def set_surfPtscht(self, verts):
self.surfPtsBht = None
if verts is not None and len(verts):
self.surfPtsBht = spatial.cKDTree(verts, leafsize=10)
self.nbSurfacePoints = len(verts)
def computeExteriorVolume(self, compartments=None, space=None, fbox_bb=None):
# compute exterior volume, totalVolume without compartments volume
unitVol = self.gridSpacing ** 3
totalVolume = self.gridVolume * unitVol
if fbox_bb is not None:
V, nbG = self.computeGridNumberOfPoint(fbox_bb, space)
totalVolume = V * unitVol
if compartments is not None:
for o in compartments:
# totalVolume -= o.surfaceVolume
totalVolume -= o.interiorVolume
return totalVolume
def computeVolume(self, space=None, fbox_bb=None):
# compute exterior volume, totalVolume without compartments volume
unitVol = self.gridSpacing ** 3
totalVolume = self.gridVolume * unitVol
if fbox_bb is not None:
V, nbG = self.computeGridNumberOfPoint(fbox_bb, space)
totalVolume = V * unitVol
return totalVolume
def create_rapid_model(self):
self.rapid_model = RAPIDlib.RAPID_model()
# need triangle and vertices
self.rapid_model.addTriangles(
numpy.array(self.vertices, "f"), numpy.array(self.faces, "i")
)
def get_rapid_model(self):
if self.rapid_model is None:
self.create_rapid_model()
return self.rapid_model
def one_rapid_ray(self, pt1, pt2, diag):
# return number of triangle /triangle contact
helper = autopack.helper
rm = self.get_rapid_model()
v1 = numpy.array(pt1)
direction = helper.unit_vector(pt2 - pt1) * diag
v2 = v1 + direction
if sum(v1) == 0.0:
v3 = v2 + numpy.array([0.0, 1.0, 0.0])
else:
v3 = v2 + helper.unit_vector(numpy.cross(v1, v2))
f = [0, 1, 2]
ray_model = RAPIDlib.RAPID_model()
ray_model.addTriangles(
numpy.array([v1, v2, v3], "f"),
numpy.array(
[f],
"i",
),
)
RAPIDlib.RAPID_Collide_scaled(
numpy.identity(3),
numpy.array([0.0, 0.0, 0.0], "f"),
1.0,
rm,
| numpy.identity(3) | numpy.identity |
# Copyright (c) 2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Contains a collection of generally useful calculation tools."""
import functools
import warnings
import numpy as np
import numpy.ma as ma
from scipy.spatial import cKDTree
from . import height_to_pressure_std, pressure_to_height_std
from ..package_tools import Exporter
from ..units import check_units, units
exporter = Exporter(globals())
@exporter.export
def resample_nn_1d(a, centers):
"""Return one-dimensional nearest-neighbor indexes based on user-specified centers.
Parameters
----------
a : array-like
1-dimensional array of numeric values from which to
extract indexes of nearest-neighbors
centers : array-like
1-dimensional array of numeric values representing a subset of values to approximate
Returns
-------
An array of indexes representing values closest to given array values
"""
ix = []
for center in centers:
index = (np.abs(a - center)).argmin()
if index not in ix:
ix.append(index)
return ix
@exporter.export
def nearest_intersection_idx(a, b):
"""Determine the index of the point just before two lines with common x values.
Parameters
----------
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
Returns
-------
An array of indexes representing the index of the values
just before the intersection(s) of the two lines.
"""
# Difference in the two y-value sets
difference = a - b
# Determine the point just before the intersection of the lines
# Will return multiple points for multiple intersections
sign_change_idx, = np.nonzero(np.diff(np.sign(difference)))
return sign_change_idx
@exporter.export
@units.wraps(('=A', '=B'), ('=A', '=B', '=B'))
def find_intersections(x, a, b, direction='all'):
"""Calculate the best estimate of intersection.
Calculates the best estimates of the intersection of two y-value
data sets that share a common x-value set.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
direction : string, optional
specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
or 'decreasing' (b becoming greater than a). Defaults to 'all'.
Returns
-------
A tuple (x, y) of array-like with the x and y coordinates of the
intersections of the lines.
"""
# Find the index of the points just before the intersection(s)
nearest_idx = nearest_intersection_idx(a, b)
next_idx = nearest_idx + 1
# Determine the sign of the change
sign_change = np.sign(a[next_idx] - b[next_idx])
# x-values around each intersection
_, x0 = _next_non_masked_element(x, nearest_idx)
_, x1 = _next_non_masked_element(x, next_idx)
# y-values around each intersection for the first line
_, a0 = _next_non_masked_element(a, nearest_idx)
_, a1 = _next_non_masked_element(a, next_idx)
# y-values around each intersection for the second line
_, b0 = _next_non_masked_element(b, nearest_idx)
_, b1 = _next_non_masked_element(b, next_idx)
# Calculate the x-intersection. This comes from finding the equations of the two lines,
# one through (x0, a0) and (x1, a1) and the other through (x0, b0) and (x1, b1),
# finding their intersection, and reducing with a bunch of algebra.
delta_y0 = a0 - b0
delta_y1 = a1 - b1
intersect_x = (delta_y1 * x0 - delta_y0 * x1) / (delta_y1 - delta_y0)
# Calculate the y-intersection of the lines. Just plug the x above into the equation
# for the line through the a points. One could solve for y like x above, but this
# causes weirder unit behavior and seems a little less good numerically.
intersect_y = ((intersect_x - x0) / (x1 - x0)) * (a1 - a0) + a0
# If there's no intersections, return
if len(intersect_x) == 0:
return intersect_x, intersect_y
# Check for duplicates
duplicate_mask = (np.ediff1d(intersect_x, to_end=1) != 0)
# Make a mask based on the direction of sign change desired
if direction == 'increasing':
mask = sign_change > 0
elif direction == 'decreasing':
mask = sign_change < 0
elif direction == 'all':
return intersect_x[duplicate_mask], intersect_y[duplicate_mask]
else:
raise ValueError('Unknown option for direction: {0}'.format(str(direction)))
return intersect_x[mask & duplicate_mask], intersect_y[mask & duplicate_mask]
@exporter.export
def interpolate_nans(x, y, kind='linear'):
"""Interpolate NaN values in y.
Interpolate NaN values in the y dimension. Works with unsorted x values.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
y : array-like
1-dimensional array of numeric y-values
kind : string
specifies the kind of interpolation x coordinate - 'linear' or 'log', optional.
Defaults to 'linear'.
Returns
-------
An array of the y coordinate data with NaN values interpolated.
"""
x_sort_args = np.argsort(x)
x = x[x_sort_args]
y = y[x_sort_args]
nans = np.isnan(y)
if kind == 'linear':
y[nans] = np.interp(x[nans], x[~nans], y[~nans])
elif kind == 'log':
y[nans] = np.interp(np.log(x[nans]), np.log(x[~nans]), y[~nans])
else:
raise ValueError('Unknown option for kind: {0}'.format(str(kind)))
return y[x_sort_args]
def _next_non_masked_element(a, idx):
"""Return the next non masked element of a masked array.
If an array is masked, return the next non-masked element (if the given index is masked).
If no other unmasked points are after the given masked point, returns none.
Parameters
----------
a : array-like
1-dimensional array of numeric values
idx : integer
index of requested element
Returns
-------
Index of next non-masked element and next non-masked element
"""
try:
next_idx = idx + a[idx:].mask.argmin()
if ma.is_masked(a[next_idx]):
return None, None
else:
return next_idx, a[next_idx]
except (AttributeError, TypeError, IndexError):
return idx, a[idx]
def delete_masked_points(*arrs):
"""Delete masked points from arrays.
Takes arrays and removes masked points to help with calculations and plotting.
Parameters
----------
arrs : one or more array-like
source arrays
Returns
-------
arrs : one or more array-like
arrays with masked elements removed
"""
if any(hasattr(a, 'mask') for a in arrs):
keep = ~functools.reduce(np.logical_or, (np.ma.getmaskarray(a) for a in arrs))
return tuple(ma.asarray(a[keep]) for a in arrs)
else:
return arrs
@exporter.export
def reduce_point_density(points, radius, priority=None):
r"""Return a mask to reduce the density of points in irregularly-spaced data.
This function is used to down-sample a collection of scattered points (e.g. surface
data), returning a mask that can be used to select the points from one or more arrays
(e.g. arrays of temperature and dew point). The points selected can be controlled by
providing an array of ``priority`` values (e.g. rainfall totals to ensure that
stations with higher precipitation remain in the mask).
Parameters
----------
points : (N, K) array-like
N locations of the points in K dimensional space
radius : float
minimum radius allowed between points
priority : (N, K) array-like, optional
If given, this should have the same shape as ``points``; these values will
be used to control selection priority for points.
Returns
-------
(N,) array-like of boolean values indicating whether points should be kept. This
can be used directly to index numpy arrays to return only the desired points.
Examples
--------
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.)
array([ True, False, True], dtype=bool)
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.,
... priority=np.array([0.1, 0.9, 0.3]))
array([False, True, False], dtype=bool)
"""
# Handle 1D input
if points.ndim < 2:
points = points.reshape(-1, 1)
# Make a kd-tree to speed searching of data.
tree = cKDTree(points)
# Need to use sorted indices rather than sorting the position
# so that the keep mask matches *original* order.
if priority is not None:
# Need to sort the locations in decreasing priority.
sorted_indices = np.argsort(priority)[::-1]
else:
# Take advantage of iterator nature of range here to avoid making big lists
sorted_indices = range(len(points))
# Keep all points initially
keep = np.ones(len(points), dtype=np.bool)
# Loop over all the potential points
for ind in sorted_indices:
# Only proceed if we haven't already excluded this point
if keep[ind]:
# Find the neighbors and eliminate them
neighbors = tree.query_ball_point(points[ind], radius)
keep[neighbors] = False
# We just removed ourselves, so undo that
keep[ind] = True
return keep
def _get_bound_pressure_height(pressure, bound, heights=None, interpolate=True):
"""Calculate the bounding pressure and height in a layer.
Given pressure, optional heights, and a bound, return either the closest pressure/height
or interpolated pressure/height. If no heights are provided, a standard atmosphere is
assumed.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressures
bound : `pint.Quantity`
Bound to retrieve (in pressure or height)
heights : `pint.Quantity`, optional
Atmospheric heights associated with the pressure levels. Defaults to using
heights calculated from ``pressure`` assuming a standard atmosphere.
interpolate : boolean, optional
Interpolate the bound or return the nearest. Defaults to True.
Returns
-------
`pint.Quantity`
The bound pressure and height.
"""
# Bound is given in pressure
if bound.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}:
# If the bound is in the pressure data, we know the pressure bound exactly
if bound in pressure:
bound_pressure = bound
# If we have heights, we know the exact height value, otherwise return standard
# atmosphere height for the pressure
if heights is not None:
bound_height = heights[pressure == bound_pressure]
else:
bound_height = pressure_to_height_std(bound_pressure)
# If bound is not in the data, return the nearest or interpolated values
else:
if interpolate:
bound_pressure = bound # Use the user specified bound
if heights is not None: # Interpolate heights from the height data
bound_height = log_interp(bound_pressure, pressure, heights)
else: # If not heights given, use the standard atmosphere
bound_height = pressure_to_height_std(bound_pressure)
else: # No interpolation, find the closest values
idx = (np.abs(pressure - bound)).argmin()
bound_pressure = pressure[idx]
if heights is not None:
bound_height = heights[idx]
else:
bound_height = pressure_to_height_std(bound_pressure)
# Bound is given in height
elif bound.dimensionality == {'[length]': 1.0}:
# If there is height data, see if we have the bound or need to interpolate/find nearest
if heights is not None:
if bound in heights: # Bound is in the height data
bound_height = bound
bound_pressure = pressure[heights == bound]
else: # Bound is not in the data
if interpolate:
bound_height = bound
# Need to cast back to the input type since interp (up to at least numpy
# 1.13 always returns float64. This can cause upstream users problems,
# resulting in something like np.append() to upcast.
bound_pressure = np.interp(np.atleast_1d(bound), heights,
pressure).astype(bound.dtype) * pressure.units
else:
idx = (np.abs(heights - bound)).argmin()
bound_pressure = pressure[idx]
bound_height = heights[idx]
else: # Don't have heights, so assume a standard atmosphere
bound_height = bound
bound_pressure = height_to_pressure_std(bound)
# If interpolation is on, this is all we need, if not, we need to go back and
# find the pressure closest to this and refigure the bounds
if not interpolate:
idx = (np.abs(pressure - bound_pressure)).argmin()
bound_pressure = pressure[idx]
bound_height = pressure_to_height_std(bound_pressure)
# Bound has invalid units
else:
raise ValueError('Bound must be specified in units of length or pressure.')
# If the bound is out of the range of the data, we shouldn't extrapolate
if (bound_pressure < np.min(pressure)) or (bound_pressure > np.max(pressure)):
raise ValueError('Specified bound is outside pressure range.')
if heights is not None:
if (bound_height > np.max(heights)) or (bound_height < np.min(heights)):
raise ValueError('Specified bound is outside height range.')
return bound_pressure, bound_height
@exporter.export
@check_units('[length]')
def get_layer_heights(heights, depth, *args, **kwargs):
"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer using
the heights only.
Parameters
----------
heights : array-like
Atmospheric heights
depth : `pint.Quantity`
The thickness of the layer
*args : array-like
Atmospheric variable(s) measured at the given pressures
bottom : `pint.Quantity`, optional
The bottom of the layer
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
with_agl : bool, optional
Returns the heights as above ground level by subtracting the minimum height in the
provided heights. Defaults to False.
Returns
-------
`pint.Quantity, pint.Quantity`
The height and data variables of the layer
"""
bottom = kwargs.pop('bottom', None)
interpolate = kwargs.pop('interpolate', True)
with_agl = kwargs.pop('with_agl', False)
# Make sure pressure and datavars are the same length
for datavar in args:
if len(heights) != len(datavar):
raise ValueError('Height and data variables must have the same length.')
# If we want things in AGL, subtract the minimum height from all height values
if with_agl:
sfc_height = np.min(heights)
heights -= sfc_height
# If the bottom is not specified, make it the surface
if bottom is None:
bottom = heights[0]
# Make heights and arguments base units
heights = heights.to_base_units()
bottom = bottom.to_base_units()
# Calculate the top of the layer
top = bottom + depth
ret = [] # returned data variables in layer
# Ensure heights are sorted in ascending order
sort_inds = np.argsort(heights)
heights = heights[sort_inds]
# Mask based on top and bottom
inds = (heights >= bottom) & (heights <= top)
heights_interp = heights[inds]
# Interpolate heights at bounds if necessary and sort
if interpolate:
# If we don't have the bottom or top requested, append them
if top not in heights_interp:
heights_interp = np.sort(np.append(heights_interp, top)) * heights.units
if bottom not in heights_interp:
heights_interp = np.sort(np.append(heights_interp, bottom)) * heights.units
ret.append(heights_interp)
for datavar in args:
# Ensure that things are sorted in ascending order
datavar = datavar[sort_inds]
if interpolate:
# Interpolate for the possibly missing bottom/top values
datavar_interp = interp(heights_interp, heights, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar)
return ret
@exporter.export
@check_units('[pressure]')
def get_layer(pressure, *args, **kwargs):
r"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer. The
bottom of the layer can be specified with a pressure or height above the surface
pressure. The bottom defaults to the surface pressure. The depth of the layer can be
specified in terms of pressure or height above the bottom of the layer. If the top and
bottom of the layer are not in the data, they are interpolated by default.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
*args : array-like
Atmospheric variable(s) measured at the given pressures
heights: array-like, optional
Atmospheric heights corresponding to the given pressures. Defaults to using
heights calculated from ``p`` assuming a standard atmosphere.
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure. Defaults
to the lowest pressure or height given.
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer.
Defaults to 100 hPa.
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
Returns
-------
`pint.Quantity, pint.Quantity`
The pressure and data variables of the layer
"""
# Pop off keyword arguments
heights = kwargs.pop('heights', None)
bottom = kwargs.pop('bottom', None)
depth = kwargs.pop('depth', 100 * units.hPa)
interpolate = kwargs.pop('interpolate', True)
# If we get the depth kwarg, but it's None, set it to the default as well
if depth is None:
depth = 100 * units.hPa
# Make sure pressure and datavars are the same length
for datavar in args:
if len(pressure) != len(datavar):
raise ValueError('Pressure and data variables must have the same length.')
# If the bottom is not specified, make it the surface pressure
if bottom is None:
bottom = pressure[0]
bottom_pressure, bottom_height = _get_bound_pressure_height(pressure, bottom,
heights=heights,
interpolate=interpolate)
# Calculate the top if whatever units depth is in
if depth.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}:
top = bottom_pressure - depth
elif depth.dimensionality == {'[length]': 1}:
top = bottom_height + depth
else:
raise ValueError('Depth must be specified in units of length or pressure')
top_pressure, _ = _get_bound_pressure_height(pressure, top, heights=heights,
interpolate=interpolate)
ret = [] # returned data variables in layer
# Ensure pressures are sorted in ascending order
sort_inds = np.argsort(pressure)
pressure = pressure[sort_inds]
# Mask based on top and bottom pressure
inds = (pressure <= bottom_pressure) & (pressure >= top_pressure)
p_interp = pressure[inds]
# Interpolate pressures at bounds if necessary and sort
if interpolate:
# If we don't have the bottom or top requested, append them
if top_pressure not in p_interp:
p_interp = np.sort(np.append(p_interp, top_pressure)) * pressure.units
if bottom_pressure not in p_interp:
p_interp = np.sort(np.append(p_interp, bottom_pressure)) * pressure.units
ret.append(p_interp[::-1])
for datavar in args:
# Ensure that things are sorted in ascending order
datavar = datavar[sort_inds]
if interpolate:
# Interpolate for the possibly missing bottom/top values
datavar_interp = log_interp(p_interp, pressure, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar[::-1])
return ret
@exporter.export
@units.wraps(None, ('=A', '=A'))
def interp(x, xp, *args, **kwargs):
r"""Interpolates data with any shape over a specified axis.
Interpolation over a specified axis for arrays of any shape.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted in ascending order.
Examples
--------
>>> x = np.array([1., 2., 3., 4.])
>>> y = np.array([1., 2., 3., 4.])
>>> x_interp = np.array([2.5, 3.5])
>>> metpy.calc.interp(x_interp, x, y)
array([ 2.5, 3.5])
Notes
-----
xp and args must be the same shape.
"""
# Pull out keyword args
fill_value = kwargs.pop('fill_value', np.nan)
axis = kwargs.pop('axis', 0)
# Make x an array
x = np.asanyarray(x).reshape(-1)
# Save number of dimensions in xp
ndim = xp.ndim
# Sort input data
sort_args = | np.argsort(xp, axis=axis) | numpy.argsort |
from __future__ import absolute_import
from __future__ import print_function
import pytest
import os
import numpy as np
from numpy.testing import assert_allclose
from keras import backend as K
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.utils import np_utils
from keras.utils.test_utils import get_test_data
from keras.models import model_from_json, model_from_yaml
from keras import losses
from keras.engine.training_utils import make_batches
input_dim = 16
num_hidden = 8
num_classes = 4
batch_size = 32
epochs = 1
@pytest.fixture
def in_tmpdir(tmpdir):
"""Runs a function in a temporary directory.
Checks that the directory is empty afterwards.
"""
with tmpdir.as_cwd():
yield None
assert not tmpdir.listdir()
def test_sequential_pop():
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim))
model.add(Dense(num_classes))
model.compile(loss='mse', optimizer='sgd')
x = np.random.random((batch_size, input_dim))
y = | np.random.random((batch_size, num_classes)) | numpy.random.random |
import numpy as np
from gmmmc.gmm import GMM
from gmmmc.proposals.proposals import Proposal
import pdb
import logging
class GaussianStepMeansProposal(Proposal):
"""Gaussian Proposal distribution for means of a GMM"""
def __init__(self, step_sizes=(0.001,)):
"""
Gaussian proposal distribution for the means. The multivariate Gaussian is centered at the means of the current
state in the Markov Chain and has covariance given by step_sizes. Multiple step sizes can be specified.
The proposal algorithm will take these steps in the sequence specified in step_sizes.
Parameters
----------
step_sizes : 1-D array_like
Iterable containing the sequence of step sizes (covariances of the Gaussian proposal distribution"
"""
super(GaussianStepMeansProposal, self).__init__()
self.step_sizes = step_sizes
self.count_accepted = np.zeros((len(step_sizes),))
self.count_illegal = np.zeros((len(step_sizes),))
self.count_proposed = np.zeros((len(step_sizes),))
def propose(self, X, gmm, target, n_jobs=1):
"""
Propose a new set of GMM means.
Parameters
----------
X : 2-D array like of shape (n_samples, n_features)
The observed data or evidence.
gmm : GMM object
The current state (set of gmm parameters) in the Markov Chain
target : GMMPosteriorTarget object
The target distribution to be found.
n_jobs : int
Number of cpu cores to use in the calculation of log probabilities.
Returns
-------
: GMM
A new GMM object initialised with new mean parameters.
"""
new_means = np.array(gmm.means)
beta = target.beta
prior = target.prior
steps = [np.random.multivariate_normal(np.zeros(gmm.n_features),
step_size * np.eye(gmm.n_features),
size=gmm.n_mixtures)
for step_size in self.step_sizes]
# calculation of prior probabilities of only the means, since only means will change
log_priors = np.array([prior.means_prior.log_prob_single(gmm.means[mixture], mixture) for mixture in xrange(gmm.n_mixtures)])
log_prob_priors = np.sum(log_priors)
previous_prob = beta * gmm.log_likelihood(X, n_jobs) + np.sum(log_priors)
for i, step in enumerate(steps):
for mixture in xrange(gmm.n_mixtures):
self.count_proposed[i] += 1
# propose new means
new_mixture_means = gmm.means[mixture] + step[mixture]
# try out the new means
proposed_means = np.array(new_means)
proposed_means[mixture] = new_mixture_means
proposed_gmm = GMM(proposed_means, np.array(gmm.covars), np.array(gmm.weights))
# calculate new prior
new_log_prob_mixture = prior.means_prior.log_prob_single(new_mixture_means, mixture)
new_log_prob_priors = log_prob_priors - log_priors[mixture] + new_log_prob_mixture
# priors
proposed_prob = beta * proposed_gmm.log_likelihood(X, n_jobs) + new_log_prob_priors
# ratio
ratio = proposed_prob - previous_prob
if ratio > 0 or ratio > np.log(np.random.uniform()):
# accept proposal
new_means = proposed_means
previous_prob = proposed_prob
# update prior probability calculation
log_prob_priors = new_log_prob_priors
log_priors[mixture] = new_log_prob_mixture
self.count_accepted[i] += 1
return GMM(new_means, np.array(gmm.covars), np.array(gmm.weights))
class GaussianStepCovarProposal(Proposal):
def __init__(self, step_sizes=(0.001,)):
"""
Gaussian proposal function for the covariances of the GMM.
Parameters
----------
step_sizes : array_like
Array of covariance values for the Gaussian proposal.
"""
super(GaussianStepCovarProposal, self).__init__()
self.step_sizes = step_sizes
self.count_accepted = np.zeros((len(step_sizes),))
self.count_illegal = np.zeros((len(step_sizes),))
self.count_proposed = np.zeros((len(step_sizes),))
def propose(self, X, gmm, target, n_jobs=1):
"""
Propose a new set of GMM covariances (diagonal only).
Parameters
----------
X : 2-D array like of shape (n_samples, n_features)
The observed data or evidence.
gmm : GMM object
The current state (set of gmm parameters) in the Markov Chain
target : GMMPosteriorTarget object
The target distribution to be found.
n_jobs : int
Number of cpu cores to use in the calculation of log probabilities.
Returns
-------
: GMM
A new GMM object initialised with new covariance parameters.
"""
new_covars = np.array(gmm.covars)
beta = target.beta
prior = target.prior
previous_prob = beta * gmm.log_likelihood(X, n_jobs) + prior.log_prob(gmm)
steps = [np.random.multivariate_normal(np.zeros(gmm.n_features),
step_size * np.eye(gmm.n_features),
size=gmm.n_mixtures) for step_size in self.step_sizes]
log_priors = np.array([prior.means_prior.log_prob_single(gmm.means[mixture], mixture) for mixture in xrange(gmm.n_mixtures)])
log_prob_priors = np.sum(log_priors)
for i, step in enumerate(steps):
for mixture in xrange(gmm.n_mixtures):
self.count_proposed[i] += 1
# propose new covars
new_mixture_covars = gmm.covars[mixture] + step[mixture]
if (new_mixture_covars > 0).all(): # check covariances are valid
# try out the new covars
proposed_covars = np.array(new_covars)
proposed_covars[mixture] = new_mixture_covars
proposed_gmm = GMM(np.array(gmm.means), proposed_covars, np.array(gmm.weights))
# calculate desired distribution
new_log_prob_mixture = prior.covars_prior.log_prob_single(new_mixture_covars, mixture)
new_log_prob_priors = log_prob_priors - log_priors[mixture] + new_log_prob_mixture
proposed_prob = beta * proposed_gmm.log_likelihood(X, n_jobs) + new_log_prob_priors
# ratio
ratio = proposed_prob - previous_prob
if ratio > 0 or ratio > np.log(np.random.uniform()):
# accept proposal
new_covars = proposed_covars
previous_prob = proposed_prob
log_prob_priors = new_log_prob_priors
log_priors[mixture] = new_log_prob_mixture
self.count_accepted[i] += 1
else:
self.count_illegal[i] += 1
return GMM(np.array(gmm.means), np.array(new_covars), np.array(gmm.weights))
class GaussianStepWeightsProposal(Proposal):
def __init__(self, n_mixtures, step_sizes=(0.001,), threshold=0.001):
"""
Gaussian proposal function for the weights of a GMM.
Parameters
----------
n_mixtures
step_sizes
Notes
----------
The proposal function works by projecting the weight vector w onto the simplex defined by
w_1 + w_2 + ..... w_n = 1 , 0<=w_i<=1. The change of basis matrix is found by finding n-1 vectors lying on the plane
and using gramm schmidt to get an orthonormal basis. A Gaussian proposal function in (n-1)-d space is
used to find the next point on the simplex.
"""
super(GaussianStepWeightsProposal, self).__init__()
self.step_sizes = step_sizes
self.n_mixtures = n_mixtures
self.count_accepted = np.zeros((len(step_sizes),))
self.count_illegal = np.zeros((len(step_sizes),))
self.count_proposed = np.zeros((len(step_sizes),))
self.threshold = threshold
if n_mixtures > 1:
# get change of basis matrix mapping n dim coodinates to n-1 dim coordinates on simplex
# x1 + x2 + x3 ..... =1
points = np.random.dirichlet([1 for i in xrange(n_mixtures)], size=n_mixtures - 1)
points = points.T
self.plane_origin = np.ones((n_mixtures)) / float(n_mixtures)
# get vectors parallel to plane from its center (1/n,1/n,....)
parallel = points - np.ones(points.shape) / float(n_mixtures)
# do gramm schmidt to get mutually orthonormal vectors (basis)
self.e, _ = np.linalg.qr(parallel)
def transformSimplex(self, weights):
"""
Project weight vector onto the normal simplex.
Parameters
----------
weights : array_like of shape (n_mixtures,)
vector of weights for each gaussian component
Returns
-------
: array_like of shape (n_mixtures-1,)
vector of weights projected onto the simplex plane
"""
# project onto the simplex
return np.dot(self.e.T, weights - self.plane_origin)
def invTransformSimplex(self, simplex_coords):
"""
Transforms a point on the simplex to the original vector space.
Parameters
----------
simplex_coords : array_like of shape (n_mixtures - 1,)
Coordinates of a weight vector on the simplex.
Returns
-------
: array_like of shape(n_mixtures,)
vector of weights.
"""
return self.plane_origin + np.dot(self.e, simplex_coords)
def propose(self, X, gmm, target, n_jobs=1):
"""
Propose a new set of weight vectors.
Parameters
----------
X : 2-D array like of shape (n_samples, n_features)
The observed data or evidence.
gmm : GMM object
The current state (set of gmm parameters) in the Markov Chain
target : GMMPosteriorTarget object
The target distribution to be found.
n_jobs : int
Number of cpu cores to use in the calculation of log probabilities.
Returns
-------
: GMM
A new GMM object initialised with new covariance parameters.
"""
accepted = False
cur_gmm = gmm
if gmm.n_mixtures > 1:
for i, step_size in enumerate(self.step_sizes):
self.count_proposed[i] += 1
current_weights_transformed = self.transformSimplex(cur_gmm.weights)
proposed_weights_transformed = np.random.multivariate_normal(current_weights_transformed,
np.eye(self.n_mixtures - 1) * step_size)
proposed_weights = self.invTransformSimplex(proposed_weights_transformed)
if np.logical_and(0 <= proposed_weights, proposed_weights <= 1).all()\
and np.isclose(np.sum(proposed_weights), 1.0) and (proposed_weights>self.threshold).all():
previous_prob = target.log_prob(X, cur_gmm, n_jobs)
proposed_gmm = GMM(np.array(cur_gmm.means), np.array(cur_gmm.covars), proposed_weights)
proposed_prob = target.log_prob(X, proposed_gmm, n_jobs)
ratio = proposed_prob - previous_prob
if ratio > 0 or ratio > np.log(np.random.uniform()):
# accept proposal
self.count_accepted[i] += 1
accepted = True
cur_gmm = proposed_gmm
else:
self.count_illegal[i] += 1
if accepted is True:
return cur_gmm
else:
return GMM(np.array(gmm.means), np.array(gmm.covars), np.array(gmm.weights))
class GaussianTuningStepMeansProposal(Proposal):
"""Gaussian Proposal distribution for means of a GMM"""
def __init__(self, step_sizes=(0.001,), limit=200):
"""
Gaussian proposal distribution for the means. The multivariate Gaussian is centered at the means of the current
state in the Markov Chain and has covariance given by step_sizes. Multiple step sizes can be specified.
The proposal algorithm will take these steps in the sequence specified in step_sizes.
Parameters
----------
step_sizes : 1-D array_like
Iterable containing the sequence of step sizes (covariances of the Gaussian proposal distribution"
"""
super(GaussianTuningStepMeansProposal, self).__init__()
self.limit = limit
self.count_steps = 0
self.count_acceptance_bucket = np.zeros((len(step_sizes),))
self.record = []
self.step_sizes = step_sizes
self.count_accepted = np.zeros((len(step_sizes),))
self.count_illegal = np.zeros((len(step_sizes),))
self.count_proposed = np.zeros((len(step_sizes),))
def propose(self, X, gmm, target, n_jobs=1):
"""
Propose a new set of GMM means.
Parameters
----------
X : 2-D array like of shape (n_samples, n_features)
The observed data or evidence.
gmm : GMM object
The current state (set of gmm parameters) in the Markov Chain
target : GMMPosteriorTarget object
The target distribution to be found.
n_jobs : int
Number of cpu cores to use in the calculation of log probabilities.
Returns
-------
: GMM
A new GMM object initialised with new mean parameters.
"""
new_means = np.array(gmm.means)
beta = target.beta
prior = target.prior
steps = [np.random.multivariate_normal(np.zeros(gmm.n_features),
step_size * np.eye(gmm.n_features),
size=gmm.n_mixtures)
for step_size in self.step_sizes]
# calculation of prior probabilities of only the means, since only means will change
log_priors = np.array([prior.means_prior.log_prob_single(gmm.means[mixture], mixture) for mixture in xrange(gmm.n_mixtures)])
log_prob_priors = | np.sum(log_priors) | numpy.sum |
from private.symbol import TSymbol, NTSymbol, BondSymbol
from private.utils import _node_match, _node_match_prod_rule, _edge_match, masked_softmax
from private.hypergraph import Hypergraph, common_node_list
from collections import Counter
from copy import deepcopy
from functools import partial
from abc import ABCMeta, abstractmethod
from networkx.algorithms.isomorphism import GraphMatcher
from typing import List, Dict, Tuple
import networkx as nx
import numpy as np
import torch
import os
import random
import warnings
DEBUG = False
class GraphGrammarBase(metaclass=ABCMeta):
@abstractmethod
def learn(self):
pass
@abstractmethod
def sample(self):
pass
class ProductionRule(object):
""" A class of a production rule
Attributes
----------
lhs : Hypergraph or None
the left hand side of the production rule.
if None, the rule is a starting rule.
rhs : Hypergraph
the right hand side of the production rule.
"""
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
@property
def is_start_rule(self) -> bool:
return self.lhs.num_nodes == 0
@property
def is_gluing(self) -> bool:
if self.lhs:
return len(self.lhs.get_all_NT_edges()) > 1
return False
@property
def is_ending(self) -> bool:
return len(self.rhs.get_all_NT_edges()) == 0
@property
def ext_node(self) -> Dict[int, str]:
""" return a dict of external nodes
"""
if self.is_start_rule:
return {}
else:
ext_node_dict = {}
for each_node in self.lhs.nodes:
ext_node_dict[self.lhs.node_attr(each_node)["ext_id"]] = each_node
return ext_node_dict
@property
def lhs_nt_symbol(self) -> NTSymbol:
if self.is_start_rule:
return NTSymbol(degree=0, is_aromatic=False, bond_symbol_list=[])
else:
# return self.lhs.edge_attr(list(self.lhs.edges)[0])['symbol']
return [self.lhs.edge_attr(edge)['symbol'] for edge in list(self.lhs.edges)]
def rhs_adj_mat(self, node_edge_list):
''' return the adjacency matrix of rhs of the production rule
'''
return nx.adjacency_matrix(self.rhs.hg, node_edge_list)
def draw(self, file_path=None):
return self.rhs.draw(file_path)
def is_same(self, prod_rule, ignore_order=False):
""" judge whether this production rule is
the same as the input one, `prod_rule`
Parameters
----------
prod_rule : ProductionRule
production rule to be compared
Returns
-------
is_same : bool
isomap : dict
isomorphism of nodes and hyperedges.
ex) {'bond_42': 'bond_37', 'bond_2': 'bond_1',
'e36': 'e11', 'e16': 'e12', 'e25': 'e18',
'bond_40': 'bond_38', 'e26': 'e21', 'bond_41': 'bond_39'}.
key comes from `prod_rule`, value comes from `self`.
"""
if self.is_start_rule:
if not prod_rule.is_start_rule:
return False, {}
else:
if prod_rule.is_start_rule:
return False, {}
else:
if prod_rule.lhs.num_nodes != self.lhs.num_nodes:
return False, {}
if prod_rule.rhs.num_nodes != self.rhs.num_nodes:
return False, {}
if prod_rule.rhs.num_edges != self.rhs.num_edges:
return False, {}
subhg_bond_symbol_counter \
= Counter([prod_rule.rhs.node_attr(each_node)['symbol'] \
for each_node in prod_rule.rhs.nodes])
each_bond_symbol_counter \
= Counter([self.rhs.node_attr(each_node)['symbol'] \
for each_node in self.rhs.nodes])
if subhg_bond_symbol_counter != each_bond_symbol_counter:
return False, {}
subhg_atom_symbol_counter \
= Counter([prod_rule.rhs.edge_attr(each_edge)['symbol'] \
for each_edge in prod_rule.rhs.edges])
each_atom_symbol_counter \
= Counter([self.rhs.edge_attr(each_edge)['symbol'] \
for each_edge in self.rhs.edges])
if subhg_atom_symbol_counter != each_atom_symbol_counter:
return False, {}
gm = GraphMatcher(prod_rule.rhs.hg,
self.rhs.hg,
partial(_node_match_prod_rule,
ignore_order=ignore_order),
partial(_edge_match,
ignore_order=ignore_order))
try:
return True, next(gm.isomorphisms_iter())
except StopIteration:
return False, {}
def graph_rule_applied_to(self, input_hg, selected_edge=None, selected_iso_mapping=None, vis=False):
""" augment `hg` by replacing `edge` with `self.rhs`.
Parameters
----------
hg : Hypergraph
edge : str
`edge` must belong to `hg`
Returns
-------
hg : Hypergraph
resultant hypergraph
nt_edge_list : list
list of non-terminal edges
"""
hg = deepcopy(input_hg)
nt_edge_dict = {}
if self.is_start_rule:
node_idx = hg.num_nodes
# hg = Hypergraph()
node_map_rhs = {} # node id in rhs -> node id in hg, where rhs is augmented.
for num_idx, each_node in enumerate(self.rhs.nodes):
hg.add_node(f"bond_{num_idx+node_idx}",
attr_dict=self.rhs.node_attr(each_node))
node_map_rhs[each_node] = f"bond_{num_idx+node_idx}"
for each_edge in self.rhs.edges:
node_list = []
for each_node in self.rhs.nodes_in_edge(each_edge):
node_list.append(node_map_rhs[each_node])
if isinstance(self.rhs.nodes_in_edge(each_edge), set):
node_list = set(node_list)
edge_id = hg.add_edge(
node_list,
attr_dict=self.rhs.edge_attr(each_edge))
if "nt_idx" in hg.edge_attr(edge_id):
nt_edge_dict[hg.edge_attr(edge_id)["nt_idx"]] = edge_id
nt_edge_list = [nt_edge_dict[key] for key in range(len(nt_edge_dict))]
return hg, nt_edge_list, True
else:
hg_NT_edges = hg.get_all_NT_edges()
lhs_NT_edges = self.lhs.get_all_NT_edges()
match_flag = []
lhs_hg_matched_edge_dict = {}
assert len(lhs_NT_edges) == 1
# print("NTs:")
# print([edge.edges for edge in hg_NT_edges])
# print([edge.edge_attr(list(edge.edges)[0])['symbol'].symbol for edge in hg_NT_edges])
# eq = lhs_NT_edges[0] == hg_NT_edges[0]
for lhs_edge in lhs_NT_edges:
if lhs_edge in hg_NT_edges:
match_flag.append(True)
assert len(lhs_edge.edges) == 1
for hg_NT_edge in hg_NT_edges:
if hg_NT_edge == lhs_edge:
if list(lhs_edge.edges)[0] not in lhs_hg_matched_edge_dict.keys():
lhs_hg_matched_edge_dict[list(lhs_edge.edges)[0]] = [hg_NT_edge]
else:
lhs_hg_matched_edge_dict[list(lhs_edge.edges)[0]].append(hg_NT_edge)
else:
match_flag.append(False)
if not all(match_flag):
return hg, [], False
# for edge in hg.edges:
# print("edge: {} -> nodes: {}".format(edge, hg.nodes_in_edge(edge)))
# order of nodes that belong to the non-terminal edge in hg
nt_order_dict = {} # hg_node -> order ("bond_17" : 1)
nt_order_dict_inv = {} # order -> hg_node
to_rm_edges = []
assert len(lhs_NT_edges) == 1
for _i, edge_lhs in enumerate(lhs_NT_edges):
edges_hg = lhs_hg_matched_edge_dict[list(edge_lhs.edges)[0]]
edges_name_to_hg = {list(edge_hg.edges)[0]:edge_hg for edge_hg in edges_hg}
edges_cand = edges_name_to_hg.keys()
# filter out those removed edges in previous iterations
edges = [_edge for _edge in edges_cand if _edge not in to_rm_edges]
# if it is empty, meaning that there is only actually one NT in hg that matches multiple NTs in the lhs, the rule should be abandoned
if len(edges) == 0:
return hg, [], False
# TODO add options
edge = selected_edge
if edge == None:
edge = np.random.choice(edges, 1)[0]
# print('selected edge:', edge)
# edge_hg = edges_hg[edges_cand.index(edge)]
edge_hg = edges_name_to_hg[edge]
iso_mappings = edge_hg.find_isomorphism_mapping(edge_lhs, vis) # From edge_hg to edge_lhs
# print("mapping:", iso_mapping)
if selected_iso_mapping is None:
iso_mapping = | np.random.choice(iso_mappings, 1) | numpy.random.choice |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from collections import Iterable
from enum import Enum
import os
import pkg_resources
import sys
import pandas as pd
import numpy as np
import scipy as sp
from clonesig import mixin_init_parameters
try:
rows, columns = os.popen('stty size', 'r').read().split()
pd.set_option('display.width', int(columns))
pd.set_option('display.max_columns', 200)
except:
print("running on server, otherwise please investigate")
# this is the threshold to consider eigenvalues null to get an approximation
# of the degree of freedom of the cosine distance matrix for signatures
EV_DOF_THRESHOLD = 0.5
def _get_projected(L, R, x):
x[x < L] = L[x < L]
x[x > R] = R[x > R]
return x
def log_binomial_coeff(n, k):
return (sp.special.gammaln(n+1) - (sp.special.gammaln(k+1) +
sp.special.gammaln(n-k+1)))
def _beta_binomial_logpmf(x, n, phi, tau):
alpha = phi / tau
beta = 1/tau - alpha
return log_binomial_coeff(n, x)\
+ sp.special.betaln(x + alpha, n - x + beta)\
- sp.special.betaln(alpha, beta)
def beta_binomial_pmf(x, n, phi, tau):
return np.exp(_beta_binomial_logpmf(x, n, phi, tau))
class Estimator(mixin_init_parameters.MixinInitParameters):
def __init__(self, T, B, C_normal, C_tumor_tot, C_tumor_minor, D, p, J,
maxiter=10000, pi=None, phi=None, xi=None, rho=None, tau=None,
verbose=False, inputMU=None, nu=None, save_trace=False):
"""
est = Estimator(...)
est.init_params(...)
est.fit(...)
"""
self.T = T
self.B = B
self.C_normal = C_normal
self.C_tumor_tot = C_tumor_tot
self.C_tumor_minor = C_tumor_minor
self.C_tumor_major = (self.C_tumor_tot - self.C_tumor_minor).astype(int)
self.C_est_tumor_mut = C_tumor_minor.copy()
# this is a choice
self.C_est_tumor_mut[self.C_est_tumor_mut == 0] = \
self.C_tumor_tot[self.C_est_tumor_mut == 0]
# alternative could be
# self.C_est_tumor_mut[self.C_est_tumor_mut==0] = 1
self.Mmax = max(self.C_tumor_major)
self.D = D
self.p = p
self.J = J
self.N = len(B)
if inputMU is None:
self.mu_matrix = self.default_mu()
else:
self.mu_matrix = inputMU
self.mu_matrix = self._remove_zeros_distrib(self.mu_matrix)
self.mu = np.moveaxis(np.tile(self.mu_matrix[:, self.T.astype(int)],
(self.J, self.Mmax, 1, 1)),
[0, 1, 2, 3], [1, 3, 2, 0])
self.L = self.mu.shape[2]
self.Fs = list()
self.maxiter = maxiter
self.init_params(pi, phi, xi, rho, tau, spasePi=False)
self.init_nu_param(nu)
self.verbose = verbose
ev, _ = np.linalg.eig(1-sp.spatial.distance.squareform(sp.spatial.distance.pdist(self.mu_matrix, 'cosine')))
self.dof = sum(ev > EV_DOF_THRESHOLD)
self.save_trace = save_trace
def init_nu_param(self, nu=None):
if nu is None:
self.nu = np.ones((self.N, self.Mmax)) * \
mixin_init_parameters.ZERO_PADDING
for i in range(self.N):
self.nu[i, :self.C_tumor_major[i]] = 1 / self.C_tumor_major[i]
# self.nu[i, min(int(np.round(max(1, self.B[i]/self.D[i] * (self.p*self.C_tumor_tot[i] + (1-self.p) * self.C_normal[i]) / self.p))), self.C_tumor_major[i]) - 1] = 1
else:
self.nu = nu
# make sure there are no null values
self.nu[self.nu == 0] = mixin_init_parameters.ZERO_PADDING
self.nu = self.nu / self.nu.sum(axis=1)[:, np.newaxis]
# get log(nu) in the right dimension (N * J * L * Mmax) for computation
self.lognu_sig = np.moveaxis(np.tile(np.log(self.nu), [self.J, self.L, 1, 1]),
[0, 1, 2], [1, 2, 0])
self.Mask = (self.nu > mixin_init_parameters.ZERO_PADDING * 10).astype(bool)
self.Mask_sig = np.moveaxis(np.tile(self.Mask, [self.J, self.L, 1, 1]),
[0, 1, 2], [1, 2, 0])
pre_eta = np.repeat(np.arange(1, self.Mmax+1).reshape([-1, 1]), self.N,
axis=1).T
self.eta = self.p * pre_eta / ((1 - self.p) * np.repeat(self.C_normal.reshape([-1, 1]), self.Mmax, axis=1) +
self.p * np.repeat(self.C_tumor_tot.reshape([-1, 1]), self.Mmax, axis=1))
self.eta[~self.Mask] = 1
self.qun, self.vmnu, self.rnus = self.get_responsabilities
@property
def get_theta(self):
return np.concatenate((self.xi.flatten(), self.pi.flatten(),
self.phi.flatten(), [self.tau]))
def get_log_xi_sig(self, xi):
"""
get log(xi) in the right dimension (N * J * L * Mmax) for computation
"""
return np.moveaxis(np.tile(np.log(xi).reshape(-1, 1).T,
[self.N, self.L, self.Mmax, 1]),
[1, 2, 3], [2, 3, 1])
def get_log_bb_sig(self, phi, tau):
"""
computes the logbinomial probability of Bn|Dn for all point, in all
clones in the right dimension (N * J * L * Mmax) for computation
"""
phi_un_bar = np.rollaxis(np.repeat([self.eta], self.J, axis=0), 0, 2) \
* np.tile(phi.reshape(-1, 1), [self.N, 1, self.Mmax])
log_bb = _beta_binomial_logpmf(
np.rollaxis(np.tile(self.B.reshape(-1, 1), [self.J, 1, self.Mmax]),
1, 0),
np.rollaxis(np.tile(self.D.reshape(-1, 1), [self.J, 1, self.Mmax]),
1, 0),
phi_un_bar, tau)
log_bb_sig = np.rollaxis(np.repeat([log_bb], self.L, axis=0), 0, 3)
return log_bb_sig
def get_log_pi_sig(self, pi):
"""
get log(pi) in the right dimension (N * J * L * Mmax) for computation
"""
return np.moveaxis(np.tile(np.log(pi), (self.N, self.Mmax, 1, 1)),
[1, 2, 3], [3, 1, 2])
def compute_F(self, qun, rnus, vmnu, new_xi, new_pi, tau, phi, nu):
q = self.compute_Q(qun, rnus, vmnu, new_xi, new_pi, tau, phi, nu)
big_qun = np.moveaxis(np.repeat([qun], self.Mmax, axis=0),
[0, 1, 2], [2, 0, 1])
big_qun_sig = np.moveaxis(np.repeat([big_qun], self.L, axis=0),
[0, 1, 2], [2, 0, 1])
big_rnus_m = np.moveaxis(np.repeat([rnus], self.Mmax, axis=0),
[0, 1, 2, 3], [3, 0, 1, 2])
big_vmn_sig = np.moveaxis(np.repeat([vmnu], self.L, axis=0),
[0, 1, 2], [2, 0, 1])
joint_dist = self._remove_zeros_joint(
big_qun_sig * big_rnus_m * big_vmn_sig)
# old version - (joint_dist * np.log(joint_dist)).sum()
h = - (joint_dist * np.log(joint_dist)).sum()
return q - h
def compute_Q(self, qun, rnus, vmnu, new_xi, new_pi, tau, phi, nu):
log_xi_sig = self.get_log_xi_sig(new_xi)
log_mu = np.log(self.mu)
log_pi = self.get_log_pi_sig(new_pi)
combiln = log_binomial_coeff(self.D, self.B)
bin_coeff = np.moveaxis(np.tile(combiln, (self.J, self.Mmax, 1)),
[0, 1, 2], [1, 2, 0])
phi_un_bar = np.repeat(self.eta.reshape(self.N, -1, self.Mmax), self.J, axis=1) *\
np.swapaxes(np.tile(phi, (self.N, self.Mmax, 1)), 1, 2)
big_b = np.moveaxis(np.tile(self.B, (self.J, self.Mmax, 1)),
[0, 1, 2], [1, 2, 0])
big_d = np.moveaxis(np.tile(self.D, (self.J, self.Mmax, 1)),
[0, 1, 2], [1, 2, 0])
term1 = sp.special.loggamma(big_b + phi_un_bar / tau)
term2 = sp.special.loggamma((1 - phi_un_bar) / tau + big_d - big_b)
term3 = sp.special.loggamma(np.ones((self.N, self.J, self.Mmax)) / tau)
term4 = sp.special.loggamma(np.ones((self.N, self.J, self.Mmax)) / tau + big_d)
term5 = sp.special.loggamma(phi_un_bar / tau)
term6 = sp.special.loggamma((1 - phi_un_bar) / tau)
big_qun = np.moveaxis(np.repeat([qun], self.Mmax, axis=0),
[0, 1, 2], [2, 0, 1])
big_qun_sig = np.moveaxis(np.repeat([big_qun], self.L, axis=0),
[0, 1, 2], [2, 0, 1])
big_rnus_m = np.moveaxis(np.repeat([rnus], self.Mmax, axis=0),
[0, 1, 2, 3], [3, 0, 1, 2])
big_vmn_sig = np.moveaxis(np.repeat([vmnu], self.L, axis=0),
[0, 1, 2], [2, 0, 1])
Q = (big_qun * vmnu * (bin_coeff + term1 + term2 + term3 - term4 - term5 - term6)).sum() + \
(big_qun_sig * big_rnus_m * big_vmn_sig * (log_xi_sig + log_mu + log_pi + self.lognu_sig)).sum()
return -Q
def compute_alternative_Q(self, qun, rnus, vmnu, new_xi, new_pi, tau, phi, nu):
"""
this function implements another way to compute Q
on can then test
self.compute_Q(qun, rnus, new_xi, new_pi, tau, phi) ==\
self.compute_alternative_Q(qun, rnus, new_xi, new_pi, tau, phi)
"""
log_xi_sig = self.get_log_xi_sig(new_xi)
log_bb_sig = self.get_log_bb_sig(phi, tau)
log_pi = self.get_log_pi_sig(new_pi)
log_mu = np.log(self.mu)
big_qun = np.moveaxis(np.repeat([qun], self.Mmax, axis=0),
[0, 1, 2], [2, 0, 1])
big_qun_sig = np.moveaxis(np.repeat([big_qun], self.L, axis=0),
[0, 1, 2], [2, 0, 1])
big_rnus_m = np.moveaxis(np.repeat([rnus], self.Mmax, axis=0),
[0, 1, 2, 3], [3, 0, 1, 2])
big_vmn_sig = np.moveaxis(np.repeat([vmnu], self.L, axis=0),
[0, 1, 2], [2, 0, 1])
Q = (big_qun_sig * big_rnus_m * big_vmn_sig *
(log_xi_sig + log_mu + log_pi + log_bb_sig + self.lognu_sig) * self.Mask_sig).sum()
return -Q
def compute_dQ(self, qun, vmnu, tau, phi):
# convention : tau and then phi
dQ = np.zeros(self.J + 1)
# general stuff
big_eta = np.repeat(self.eta.reshape(self.N, -1, self.Mmax), self.J, axis=1)
phi_un_bar = big_eta *\
np.swapaxes(np.tile(phi, (self.N, self.Mmax, 1)), 1, 2)
big_b = np.moveaxis(np.tile(self.B, (self.J, self.Mmax, 1)),
[0, 1, 2], [1, 2, 0])
big_d = np.moveaxis(np.tile(self.D, (self.J, self.Mmax, 1)),
[0, 1, 2], [1, 2, 0])
big_qun = np.moveaxis(np.repeat([qun], self.Mmax, axis=0),
[0, 1, 2], [2, 0, 1])
# compute dQ/d\tau
term1 = sp.special.psi(big_b + phi_un_bar / tau)
term2 = sp.special.psi((1 - phi_un_bar) / tau + big_d - big_b)
term3 = sp.special.psi(np.ones((self.N, self.J, self.Mmax)) / tau)
term4 = sp.special.psi(np.ones((self.N, self.J, self.Mmax)) / tau + big_d)
term5 = sp.special.psi(phi_un_bar / tau)
term6 = sp.special.psi((1 - phi_un_bar) / tau)
dQ[0] = (big_qun * vmnu / (tau**2) * (- phi_un_bar * term1
- (1 - phi_un_bar) * term2
- term3
+ term4
+ phi_un_bar * term5
+ (1 - phi_un_bar) * term6)).sum()
# compute dQ/f\phi_u
factor = big_qun * vmnu * big_eta / tau
dQ[1:] = (factor * (term1 - term2 - term5 + term6)).sum(axis=0).sum(axis=1)
return -dQ
def compute_alternative_dQ(self, qun, vmnu, tau, phi):
"""
this function implements another way to compute dQ
on can then test
self.compute_dQ(qun, rnus, new_xi, new_pi, tau, phi)[1:] == \
self.compute_alternative_dQ(qun, rnus, new_xi, new_pi, tau, phi)[1:]
# only implemented for dQ/d\phi as \tau=1/\tau, so not the same
"""
dQ = np.zeros(self.J + 1)
big_eta = np.repeat(self.eta.reshape(self.N, -1, self.Mmax),
self.J, axis=1)
big_qun = np.moveaxis(np.repeat([qun], self.Mmax, axis=0),
[0, 1, 2], [2, 0, 1])
for mut in range(self.N):
bn = np.moveaxis(np.tile(np.arange(self.B[mut]),
(self.J, self.Mmax, 1)),
[0, 1, 2], [1, 2, 0])
dn = np.moveaxis(np.tile(np.arange(self.D[mut]),
(self.J, self.Mmax, 1)),
[0, 1, 2], [1, 2, 0])
phi_big1 = np.swapaxes(np.tile(phi, (int(self.B[mut]), self.Mmax, 1)),
1, 2)
to_add_phi_1 = (big_eta[mut, :, :] /
(big_eta[mut, :, :] * phi_big1 + bn * tau)).sum(axis=0)
bndn = np.moveaxis(np.tile(np.arange(self.D[mut] - self.B[mut]),
(self.J, self.Mmax, 1)),
[0, 1, 2], [1, 2, 0])
phi_big2 = np.swapaxes(np.tile(phi, (int(self.D[mut] - self.B[mut]),
self.Mmax, 1)), 1, 2)
to_add_phi_2 = (-big_eta[mut, :, :] /
(1 - phi_big2 * big_eta[mut, :, :] + bndn * tau))\
.sum(axis=0)
dQ[1:] += (big_qun[mut, :, :] * vmnu[mut, :, :] *
(to_add_phi_1 + to_add_phi_2)).sum(axis=1)
to_add_tau_1 = (bn / (big_eta[mut, :, :] * phi_big1 + bn * tau))\
.sum(axis=0)
to_add_tau_2 = (bndn /
(1 - phi_big2 * big_eta[mut, :, :] + bndn * tau))\
.sum(axis=0)
to_add_tau_3 = (dn / (1 + dn * tau)).sum(axis=0)
dQ[0] += (big_qun[mut, :, :] * vmnu[mut, :, :] *
(to_add_tau_1 + to_add_tau_2 - to_add_tau_3)).sum()
return -dQ
def compute_dQ2(self, qun, vmnu, tau, phi):
dQ2 = np.zeros((self.J + 1, self.J + 1))
big_eta = np.repeat(self.eta.reshape(self.N, -1, self.Mmax),
self.J, axis=1)
phi_un_bar = big_eta *\
np.swapaxes(np.tile(phi, (self.N, self.Mmax, 1)), 1, 2)
big_b = np.moveaxis(np.tile(self.B, (self.J, self.Mmax, 1)),
[0, 1, 2], [1, 2, 0])
big_d = np.moveaxis(np.tile(self.D, (self.J, self.Mmax, 1)),
[0, 1, 2], [1, 2, 0])
term1_0 = sp.special.psi(big_b + phi_un_bar / tau)
term2_0 = sp.special.psi((1 - phi_un_bar) / tau + big_d - big_b)
term3_0 = sp.special.psi(np.ones((self.N, self.J, self.Mmax)) / tau)
term4_0 = sp.special.psi(np.ones((self.N, self.J, self.Mmax)) / tau + big_d)
term5_0 = sp.special.psi(phi_un_bar / tau)
term6_0 = sp.special.psi((1 - phi_un_bar) / tau)
term1_1 = sp.special.polygamma(1, big_b + phi_un_bar / tau)
term2_1 = sp.special.polygamma(1, (1 - phi_un_bar) / tau + big_d - big_b)
term3_1 = sp.special.polygamma(1, np.ones((self.N, self.J, self.Mmax)) / tau)
term4_1 = sp.special.polygamma(1, np.ones((self.N, self.J, self.Mmax)) / tau + big_d)
term5_1 = sp.special.polygamma(1, phi_un_bar / tau)
term6_1 = sp.special.polygamma(1, (1 - phi_un_bar) / tau)
u_prime_v = (2 / (tau**3)) * (phi_un_bar * term1_0
+ (1 - phi_un_bar) * term2_0
+ term3_0
- term4_0
- phi_un_bar * term5_0
- (1 - phi_un_bar) * term6_0)
v_prime_u = (phi_un_bar**2 / tau**4 * term1_1
+ (1 - phi_un_bar)**2 / tau**4 * term2_1
+ term3_1 / tau**4
- term4_1 / tau**4
- phi_un_bar**2 / tau**4 * term5_1
- (1 - phi_un_bar)**2 / tau**4 * term6_1)
big_qun = np.moveaxis(np.repeat([qun], self.Mmax, axis=0),
[0, 1, 2], [2, 0, 1])
dQ2[0] = (big_qun * vmnu * (u_prime_v + v_prime_u)).sum()
factor = big_qun * vmnu * big_eta / tau**2
dQ2[0, 1:] = dQ2[1:, 0] = (factor * (- term1_0
- phi_un_bar / tau * term1_1
+ term2_0
+ (1 - phi_un_bar) / tau * term2_1
+ term5_0
+ phi_un_bar / tau * term5_1
- term6_0
- (1 - phi_un_bar) / tau * term6_1)).sum(axis=0).sum(axis=1)
dQ2[np.arange(1, self.J+1), np.arange(1, self.J+1)] = \
(big_qun * vmnu * big_eta**2 / tau**2 * (term1_1 + term2_1 - term5_1 - term6_1)).sum(axis=0).sum(axis=1)
return -dQ2
def compute_alternative_dQ2(self, qun, vmnu, tau, phi):
"""
this function implements another way to compute dQ
on can then test
self.compute_dQ(qun, rnus, new_xi, new_pi, tau, phi)[1:] == \
elf.compute_alternative_dQ(qun, rnus, new_xi, new_pi, tau, phi)[1:]
# only implemented for dQ/d\phi as \tau=1/\tau, so not the same
"""
dQ2 = np.zeros((self.J + 1, self.J + 1))
big_eta = np.repeat(self.eta.reshape(self.N, -1, self.Mmax),
self.J, axis=1)
big_qun = np.moveaxis(np.repeat([qun], self.Mmax, axis=0),
[0, 1, 2], [2, 0, 1])
for mut in range(self.N):
bn = np.moveaxis(np.tile(np.arange(self.B[mut]),
(self.J, self.Mmax, 1)),
[0, 1, 2], [1, 2, 0])
dn = np.moveaxis(np.tile(np.arange(self.D[mut]),
(self.J, self.Mmax, 1)),
[0, 1, 2], [1, 2, 0])
phi_big1 = np.swapaxes(np.tile(phi, (int(self.B[mut]), self.Mmax, 1)),
1, 2)
to_add_phi_1 = (big_eta[mut, :, :]**2 / (big_eta[mut, :, :] * phi_big1 + bn * tau)**2).sum(axis=0)
bndn = np.moveaxis(np.tile(np.arange(self.D[mut] - self.B[mut]),
(self.J, self.Mmax, 1)),
[0, 1, 2], [1, 2, 0])
phi_big2 = np.swapaxes(np.tile(phi, (int(self.D[mut] - self.B[mut]), self.Mmax, 1)),
1, 2)
to_add_phi_2 = (big_eta[mut, :, :]**2 / (1 - phi_big2 * big_eta[mut, :, :] + bndn * tau)**2).sum(axis=0)
dQ2[np.arange(1, self.J+1), np.arange(1, self.J+1)] += (big_qun[mut, :, :] * vmnu[mut, :, :] * (- to_add_phi_1 - to_add_phi_2)).sum(axis=1)
to_add_tau_1 = (bn**2 / ((big_eta[mut, :, :] * phi_big1 + bn * tau))**2).sum(axis=0)
to_add_tau_2 = (bndn**2 / ((1 - phi_big2 * big_eta[mut, :, :] + bndn * tau))**2).sum(axis=0)
to_add_tau_3 = (dn**2 / ((1 + dn * tau))**2).sum(axis=0)
dQ2[0, 0] += (big_qun[mut, :, :] * vmnu[mut, :, :] * (- to_add_tau_1 - to_add_tau_2 + to_add_tau_3)).sum()
to_add_phi_tau_1 = (big_eta[mut, :, :] * bn / ((big_eta[mut, :, :] * phi_big1 + bn * tau)**2)).sum(axis=0)
to_add_phi_tau_2 = (big_eta[mut, :, :] * bndn / ((1 - phi_big2 * self.eta[mut] + bndn * tau)**2)).sum(axis=0)
dQ2[1:, 0] += (big_qun[mut, :, :] * vmnu[mut, :, :] * (- to_add_phi_tau_1 + to_add_phi_tau_2)).sum(axis=1)
dQ2[0, 1:] += (big_qun[mut, :, :] * vmnu[mut, :, :] * (- to_add_phi_tau_1 + to_add_phi_tau_2)).sum(axis=1)
return -dQ2
@staticmethod
def _remove_zeros_joint(joint):
pre_joint = joint + mixin_init_parameters.ZERO_PADDING * (joint == 0)
joint_norm = pre_joint / pre_joint.sum(axis=3).sum(axis=2).sum(axis=1)[:, np.newaxis, np.newaxis, np.newaxis]
return joint_norm
@staticmethod
def _get_binded_variables(L, R, x, dQ, epsilon):
B_left = (x <= L + epsilon) & (dQ > 0)
B_right = (x >= R - epsilon) & (dQ < 0)
return B_left | B_right
@staticmethod
def _compute_right_term(x, x_new, B, dQ, dQ2, L, R):
pre_lam = np.linalg.inv(dQ2).dot(dQ)[~B]
left_part = np.sum((dQ[~B]).dot(pre_lam))
right_part = np.sum(((dQ[B]).dot((x - x_new)[B])))
return left_part, right_part
@property
def get_responsabilities(self):
# compute qun
log_xi_sig = self.get_log_xi_sig(self.xi)
log_bb_sig = self.get_log_bb_sig(self.phi, self.tau)
log_pi = self.get_log_pi_sig(self.pi)
pre_altfinal = np.exp(log_xi_sig + log_bb_sig + log_pi + np.log(self.mu) + self.lognu_sig)
pre_altfinal[pre_altfinal == 0] = 2 * sys.float_info.min
final = pre_altfinal * self.Mask_sig
pre_qun = final.sum(axis=3).sum(axis=2)
row_sums = pre_qun.sum(axis=1)
qun = pre_qun / row_sums[:, np.newaxis]
# compute vmnu
pre_vmnu = final.sum(axis=2)
row_sums = pre_vmnu.sum(axis=2)
vmnu = pre_vmnu / np.expand_dims(row_sums, axis=2)
# compute rnus
pre_rnus = np.exp(log_pi[:, :, :, 0] + np.log(self.mu)[:, :, :, 0])
row_sums = pre_rnus.sum(axis=2)
rnus = pre_rnus / row_sums[:, :, np.newaxis]
return qun, vmnu, rnus
def fit(self, epsilon_em=None, epsilon_newton=10**-6, epsilon_box=10**-6,
beta=0.5, sigma=0.25):
if epsilon_em is None:
epsilon_em = 10**-5 * self.J * self.L
new_theta = self.get_theta * 10000
em = 0
while (np.sqrt(np.sum((new_theta-self.get_theta)**2)) > epsilon_em) and (em < self.maxiter):
if self.verbose:
print(em, self.xi, self.phi, self.tau, np.sqrt(np.sum((new_theta - self.get_theta)**2)))
em = em + 1
new_theta = self.get_theta
###########
# E-phase #
###########
qun, vmnu, rnus = self.get_responsabilities
###########
# M-phase #
###########
new_xi = qun.sum(axis=0) / self.N
pre_new_pi = (rnus*np.rollaxis(np.repeat([qun], self.L, axis=0), 0, 3)).sum(axis=0)/np.rollaxis(np.repeat([qun], self.L, axis=0), 0, 3).sum(axis=0)
new_pi = self._remove_zeros_distrib(pre_new_pi)
L = np.zeros(self.J + 1) + 1e-5
R = np.ones(self.J + 1) - 1e-5
R[0] = 0.5
# newton method
x_0 = np.concatenate(([self.tau], self.phi))
x = x_0
currentQ = self.compute_Q(qun, rnus, vmnu, new_xi, new_pi, x[0], x[1:], self.nu)
newt = 0
while True:
if self.verbose:
print('newt', newt, x, currentQ)
newt = newt + 1
dQ2 = self.compute_dQ2(qun, vmnu, x[0], x[1:])
dQ = self.compute_dQ(qun, vmnu, x[0], x[1:])
if (np.sum(dQ) == 0) & (em == 1):
break
# get epsilon
tmp_new_x = _get_projected(L, R, x - dQ)
eps_k = min(epsilon_box, np.sqrt(np.sum((x - tmp_new_x)**2)))
# get I^{sharp} (binded variables)
B = self._get_binded_variables(L, R, x, dQ, eps_k)
# get D
dQ2[:, B] = dQ2[B, :] = 0
dQ2[B, B] = 1
# get alpha k
m = 1
alpha = 1
x_new = _get_projected(L, R, x - alpha * np.linalg.inv(dQ2).dot(dQ))
new_Q = self.compute_Q(qun, rnus, vmnu, new_xi, new_pi, x_new[0], x_new[1:], self.nu)
left_part, right_part = self._compute_right_term(x, x_new, B, dQ, dQ2, L, R)
# deal with cases where the hessian in indefinite!
if left_part < 0:
eigenvalues = np.linalg.eigvals(dQ2)
if min(eigenvalues) < 0:
to_add_eig = np.abs(min(eigenvalues)) + np.finfo(np.float32).eps
dQ2 = dQ2 + to_add_eig * np.identity(len(x))
x_new = _get_projected(L, R, x - alpha * np.linalg.inv(dQ2).dot(dQ))
new_Q = self.compute_Q(qun, rnus, vmnu, new_xi, new_pi, x_new[0], x_new[1:], self.nu)
left_part, right_part = \
self._compute_right_term(x, x_new, B, dQ, dQ2, L, R)
# stopping criterion
if left_part + right_part < epsilon_newton:
break
# line search
right_term = sigma * (beta * alpha * left_part + right_part)
if self.verbose:
print('linesearch_before', m, currentQ, new_Q, right_term,
x, x_new)
while not ((currentQ - new_Q) >= right_term):
if self.verbose:
print('linesearch', m, currentQ, new_Q,
right_term, x, x_new)
m += 1
alpha = beta * alpha
x_new = _get_projected(L, R, x - alpha * np.linalg.inv(dQ2).dot(dQ))
new_Q = self.compute_Q(qun, rnus, vmnu, new_xi, new_pi, x_new[0], x_new[1:], self.nu)
left_part, right_part = self._compute_right_term(x, x_new, B, dQ, dQ2, L, R)
right_term = sigma * (beta * alpha * left_part + right_part)
x = x_new
currentQ = self.compute_Q(qun, rnus, vmnu, new_xi, new_pi, x[0], x[1:], self.nu)
new_tau = x[0]
new_phi = x[1:]
self.xi = new_xi
self.pi = new_pi
self.tau = new_tau
self.phi = new_phi
self.qun = qun
self.rnus = rnus
self.vmnu = vmnu
currentQ = self.compute_F(qun, rnus, vmnu, new_xi, new_pi, x[0], x[1:], self.nu)
if self.save_trace:
self.Fs.append(currentQ)
self.Fs.append(currentQ)
@property
def get_k(self):
return self.J * (self.L - 1 + 2)
@property
def get_k_cn(self):
"""
k is the number of parameters of the model
using this function to test several values
wrt data. True value is
self.J * (self.L - 1 + 2)
(self.L - 1) because of 1 degree of liberty in pi
2 for phi and xi
-1 because xi lacks a degree of freedom
1 on top for tau/rho
np.mean(self.C_tumor_major) to account for the extra degree of freedom
of the model due to fitting the copy number.
"""
return self.J * (self.L - 1 + 2) * np.mean(self.C_tumor_major)
@property
def get_k_dof_cn(self):
"""
same as get_k_cn but with the degree of freedom of the input signature
matrix instead of the number of signatures.
"""
return self.J * (self.dof - 1 + 2) * np.mean(self.C_tumor_major)
def get_bic(self, dof=False, cn=False):
if not cn:
k = self.get_k
else:
if dof:
k = self.get_k_dof_cn
else:
k = self.get_k_cn
return - k * np.log(self.N) / 2 + self.get_loglikelihood
def get_bic_heuristics(self, dof=True, factor=0.042, cn=False):
"""
the factor is valid for dof=True
(O.O65 for a subset, O.O34 for 47 signatures)
otherwise, we advise factor around 0.015 for the 47 signatures
or around 0.040 for a subset of signatures.
"""
if not cn:
k = self.get_k
else:
if dof:
k = self.get_k_dof_cn
else:
k = self.get_k_cn
return - factor * k * | np.log(self.N) | numpy.log |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = | N.array([3,3,3]) | numpy.array |
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
class Particle:
def __init__(self, resource, feature_num, k):
self.resource = resource
self.k = k # features to be selected
if k is not None:
self.feature_selected = np.random.choice(feature_num, self.k, replace=False).tolist()
self.feature_selected.sort()
self.position = indices2binary(self.feature_selected, feature_num)
else:
self.position = np.random.randint(0, 2, size=feature_num)
self.feature_selected = binary2indices(self.position)
self.performance = None
"""
evaluate the fitness/metrics of a particle
"""
def evaluate(self, x, y, clf, metrics="accuracy"):
x_train, x_valid = x
y_train, y_valid = y
indices = self.feature_selected
clf.fit(x_train[:, indices], y_train)
y_pred = clf.predict(x_valid[:, indices])
if metrics == "accuracy":
self.performance = accuracy_score(y_valid, y_pred)
if metrics == "f1":
self.performance = f1_score(y_valid, y_pred)
def mutation(self, global_best, sample_best):
# d = len(self.position)
# c1 = 0.5
# c2 = 1.0 - c1
# r1 = np.random.rand(d)
# r2 = np.random.rand(d)
#
# # update the position
# v = c1 * r1 * (global_best.position - self.position) + \
# c2 * r2 * (sample_best[self.resource].position - self.position)
# s = 1 / (1 + np.exp(-v))
# self.position = np.array([1 if si >= 0.5 else 0 for si in s])
# self.feature_selected = binary2indices(self.position)
d = len(self.position)
m = int(d * 0.1 / 2)
for _ in range(m):
bit1, bit2 = np.random.choice(d, 2, replace=False)
if self.position[bit1] + self.position[bit2] == 1:
self.position[bit1] = 1 - self.position[bit1]
self.position[bit2] = 1 - self.position[bit2]
def __lt__(self, other):
return self.performance < other.performance
def __le__(self, other):
return self.performance <= other.performance
def __gt__(self, other):
return self.performance > other.performance
def __ge__(self, other):
return self.performance >= other.performance
def __str__(self):
return "resource = " + str(self.resource) + ", performance = " + str(self.performance)
def crossover(chromosome1, chromosome2, m):
assert len(chromosome1) == len(chromosome2)
chromosome1 = list(chromosome1)
chromosome2 = list(chromosome2)
cutting_points = np.random.choice(len(chromosome1), m, replace=False).tolist()
cutting_points.append(len(chromosome1))
cutting_points.sort()
result = []
index = 0
flag = 1
for point in cutting_points:
if flag > 0:
result = result + chromosome1[index:point]
index = point
flag = -1
elif flag < 0:
result = result + chromosome2[index:point]
index = point
flag = 1
return np.array(result, dtype=int)
def mutation_ga(chromosome, pm, k):
n1 = sum(chromosome)
n0 = len(chromosome) - n1
p1 = pm
p0 = pm * n1 / n0
for i, g in enumerate(chromosome):
r = np.random.rand()
if g == 1 and r < p1:
chromosome[i] = 0
elif g == 0 and r < p0:
chromosome[i] = 1
if k is not None:
# ensure the number of the selected features to be same
selected_num = sum(chromosome)
if selected_num == k:
return
if selected_num > k:
one_index = [index for index in range(len(chromosome)) if chromosome[index] == 1]
remove_index = np.random.choice(one_index, selected_num - k, replace=False)
chromosome[remove_index] = 0
if selected_num < k:
zero_index = [index for index in range(len(chromosome)) if chromosome[index] == 0]
remove_index = np.random.choice(zero_index, k - selected_num)
chromosome[remove_index] = 1
def binary2indices(binary):
indices = []
for i, bit in enumerate(binary):
if bit == 1:
indices.append(i)
return indices
def indices2binary(indices, feature_num):
binary = [0] * feature_num
for indice in indices:
binary[indice] = 1
return np.array(binary, dtype=int)
def local_mutation(particle):
position = list(particle.position)
d = len(position)
m = int(d * 0.1 / 2)
for _ in range(m):
bit1, bit2 = np.random.choice(d, 2, replace=False)
if position[bit1] + position[bit2] == 1:
position[bit1] = 1 - position[bit1]
position[bit2] = 1 - position[bit2]
p = Particle(particle.resource, d, particle.k)
p.position = | np.array(position) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
model ocean waves with kayak sitting on top.
Created on Sat Sep 4 15:59:49 2021
@author: jimlux
"""
import numpy as np
from matplotlib import cm
import matplotlib.pyplot as plt
Earthg = 9.8
""" define swells
period in seconds
height in feet (because that's what the reports give)
direction is "from" in degrees """
swells = [{"period":5,"height":.9,"direction":250},
{"period":13,"height":.8,"direction":210},
{"period":16,"height":.9,"direction":215}]
nswells = len(swells)
""" calculate wavelength (lambda) and speed
and convert height to meters, and unipolar
and convert direction to radians"""
for swell in swells:
swell["speed"] = Earthg/(2. * np.pi) *swell["period"]
swell["wavelength"] = swell["speed"] *swell["period"]
swell["height"] = swell["height"]/3.28/2
swell["direction"] = (90-swell["direction"])*np.pi/180.
gridspacing = 2.0
gridsize = 400
xrange = np.arange(0,gridsize*gridspacing,gridspacing)
yrange = np.arange(0,gridsize*gridspacing,gridspacing)
X,Y = np.meshgrid(xrange,yrange)
phases = np.zeros((gridsize,gridsize,nswells))
for idx,swell in enumerate(swells):
swellcos = np.cos(swell["direction"])
swellsin = np.sin(swell["direction"])
wavelength = swell["wavelength"]
print(wavelength)
""" there's got to be a better way to do this """
dswell1 = X*swellcos + Y*swellsin
phases[:,:,idx]=np.pi*2.0 * dswell1/wavelength
"""for i in range(gridsize):
for j in range(gridsize):
dx = i*gridspacing
dy = j*gridspacing
dswell = dx * swellcos + dy*swellsin
phases[i,j,idx]=np.pi * 2 * dswell/wavelength
"""
#fig,ax = plt.subplots(subplot_kw={"projection": "3d"})
#plt.figure()
#plt.title("swell #%d"%idx)
#surf = ax.plot_surface(xrange, yrange, phases[:,:,idx], cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
#seaheight = np.cos(phases[:,:,idx])
#img = plt.imshow(seaheight)
makefigs = True
for t in | np.arange(0,60,step=1) | numpy.arange |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/2/19 10:38
# @Author : xiaorun
# @Site :
# @File : stereo.py
# @Software: PyCharm
import cv2
import numpy as np
#####################左右相机外参#################################################
###########左相机世界坐标与图像坐标
object_3d_points_L = np.array(([-1300, 1200, 0],
[1400, 1360, 0],
[500, 2100, 0],
[-700, 2400, 0],
[-300,3000,0],
[1300,3100,0],
[-800,3350,0],
[-1200,4260,0],
[100,4650,0],
[1350,4920,0]),dtype=np.double)
object_2d_point_L= np.array(([535, 662],
[1038, 594],
[839, 494],
[641, 471],
[694,403],
[909,383],
[619,373],
[567,304],
[718,273],
[847,250]),dtype=np.double)
##########左相机世界图像坐标
###########右相机世界坐标与图像坐标
object_3d_points_R= np.array(([-1300, 1200, 0],
[1400, 1360, 0],
[500, 2100, 0],
[-700, 2400, 0],
[-300,3000,0],
[1300,3100,0],
[-800,3350,0],
[-1200,4260,0],
[100,4650,0],
[1350,4920,0]),dtype=np.double)
object_2d_point_R= np.array(([271, 619],
[792, 693],
[629, 504],
[449, 457],
[529, 397],
[758,400],
[470,361],
[451,289],
[612,270],
[756,259]),dtype=np.double)
##########右相机世界图像坐标
########################################################################
#################左右相机内参###############################################
camera_matrix_L = np.array(([548.2646, 0, 666.9979],
[0, 549.5398, 494.3196],
[0, 0, 1.0]), dtype=np.double)
dist_coefs_L = np.array([0.0119, -0.0018, 0, 0], dtype=np.double)
rotM_L=np.array(([ 0.99647804, -0.0835722, 0.00687046],
[-0.06215961, -0.7911815, -0.60841435],
[ 0.05628231, 0.60584447, -0.79358981]),dtype=np.double)
tvec_L=np.array(([ 734.59639027],
[ 1704.06440486],
[ 2086.22518369]),dtype=np.double)
RT_leftcamera=np.hstack((rotM_L,tvec_L))
m_left=np.matmul(camera_matrix_L ,RT_leftcamera)
camera_matrix_R=np.array(([554.9156,0,635.8487],
[0,555.5995,509.9433],
[0,0,1.0]),dtype=np.double)
dist_coefs_R=np.array([0.0256,-0.0133,0,0],dtype=np.double)
rotM_R=np.array(([ 0.99353492, 0.1123029, 0.01662595],
[ 0.10227696, -0.82186833, -0.56042115],
[-0.04927258, 0.55849843, -0.82804089]),dtype=np.double)
tvec_R=np.array(([ -773.49701673],
[ 1739.81170979],
[ 2156.35612044]),dtype=np.double)
RT_rightcamera=np.hstack((rotM_R,tvec_R))
#右相机M矩阵
# [u1] |X| [u2] |X|
# Z*[v1| = Ml*|Y| Z*|v2| = Mr*|Y|
# [ 1] |Z| [ 1] |Z|
# |1| |1|
m_right= | np.matmul(camera_matrix_R ,RT_rightcamera) | numpy.matmul |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#Coastal Pioneer WireFollowing Profilers (WFP
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/RII01/02-ADCPSL010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMCO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUI' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP02PMUO' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/RII01/02-ADCPSL010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISPM/RII01/02-ADCPTG010/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISPM/RII01/02-ADCPTG010/recovered_host/adcps_jln_stc_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISPM' and node == 'RISER' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/RII01/02-ADCPTG010/telemetered/adcps_jln_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'adcps_jln_heading'
var_list[3].name = 'adcps_jln_pitch'
var_list[4].name = 'adcps_jln_roll'
var_list[5].name = 'adcps_jln_eastward_seawater_velocity2'
var_list[6].name = 'adcps_jln_northward_seawater_velocity2'
var_list[7].name = 'adcps_jln_upward_seawater_velocity2'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'cdegree'
var_list[3].units = 'cdegree'
var_list[4].units = 'cdegree'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL336/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL336' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL336/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL388/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL388' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL388/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL335/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL335/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL335/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL335/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL335' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL335/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL339/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL339/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL339/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL339/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL339' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL339/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL340/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL340/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL340/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL340/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL340' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL340/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL374/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL374/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL374/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL374/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL374' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL374/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL375/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL375/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL375/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL375/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL375' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL375/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL376/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL376/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL376/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL376/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL376' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL376/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL379/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL379/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL379/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL379/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL379/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL379/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL379/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL379/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL379' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL379/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL380/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL380/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL380/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL380/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL380/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL380/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL380/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL380/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL380' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL380/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL387/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL387/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL387/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL387/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL387/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL387/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL387/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL387/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL387' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL387/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL389' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL389/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL389' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL389/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL389' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL389/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL389' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL389/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL389' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL389/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL389' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL389/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL389' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL389/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL389' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL389/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL389' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL389/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL514/03-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL514/03-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL514/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL514/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL514/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL514/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP05MOAS/GL514/05-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL514/05-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CPGL514' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP05MOAS/GL514/01-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = | np.array([]) | numpy.array |
from functools import reduce
from math import exp, isclose, log, pi
from os import makedirs, path
import matplotlib.pyplot as plt
import numpy as np
from scipy import special
working_dir = path.dirname(path.abspath(__file__))
makedirs(path.join(working_dir, 'plots'), exist_ok=True)
try:
data = np.load(path.join(working_dir, 'data.npy'))
except FileNotFoundError:
data = np.load(path.join(working_dir, 'task4.npy'))
def hist(x_array, n_bins, continuous=True, normalize=True):
min_val = x_array.min()
max_val = x_array.max()
count = np.zeros(int(n_bins))
for x in x_array:
bin_number = int((n_bins - 1) * ((x - min_val) / (max_val - min_val)))
count[bin_number] += 1
# normalize the distribution
if normalize:
count /= x_array.shape[0]
if continuous:
count /= ((max_val - min_val) / n_bins)
return count, np.linspace(min_val, max_val, num=n_bins)
num_bins = 100
counts, bins = hist(data, num_bins, continuous=False, normalize=False)
plt.bar(bins, counts, width=0.5, align='edge', color='gray')
plt.xlabel('x')
plt.ylabel(r'$P\left(x\right)$')
plt.savefig(path.join(working_dir, 'plots/hist.eps'), bbox_inches='tight')
plt.close()
counts, bins = hist(data, num_bins, continuous=False, normalize=True)
plt.bar(bins, counts, width=0.5, align='edge', color='gray')
plt.xlabel('x')
plt.ylabel(r'$P\left(x\right)$')
plt.savefig(
path.join(working_dir, 'plots/hist_normalized.eps'), bbox_inches='tight'
)
def poisson_likelihood(x, lambda_):
n = x.shape[0]
lambda_x = reduce(
lambda y, z: y * z, (lambda_ ** x).tolist()
)
x_factorial = reduce(
lambda y, z: y * z, special.factorial(x, exact=True).tolist()
)
return exp(- lambda_ * n) * lambda_x / x_factorial
def poisson_log_likelihood(x, lambda_):
n = x.shape[0]
log_lambda_x = log(lambda_) * np.sum(x)
log_x_factorial = np.sum(np.log(special.factorial(x, exact=True)))
return (- lambda_ * n) + log_lambda_x - log_x_factorial
# Poisson MLE
lambda_hat = np.mean(data)
def gaussian_likelihood(x, mu, var):
n = x.shape[0]
normalization_factor = (2. * pi * var) ** (-.5 * n)
x_minus_mu_squared = np.sum((x - mu) ** 2)
return normalization_factor * exp(- x_minus_mu_squared / (2. * var))
def gaussian_log_likelihood(x, mu, var):
n = x.shape[0]
log_normalization_factor = (-.5 * n) * log(2. * pi * var)
x_minus_mu_squared = np.sum((x - mu) ** 2)
return log_normalization_factor - x_minus_mu_squared / (2. * var)
# Gaussian MLE
mu_hat = np.mean(data)
var_hat = np.var(data)
assert isclose(
gaussian_log_likelihood(data, mu_hat, var_hat), -40287.57, abs_tol=1e-2
)
assert np.equal(
exp(gaussian_log_likelihood(data, mu_hat, var_hat)),
gaussian_likelihood(data, mu_hat, var_hat)
)
print('Poisson')
print('Lambda (MLE): \t\t', lambda_hat)
try:
print('Likelihood: \t\t', poisson_likelihood(data, lambda_hat))
except OverflowError:
print('Likelihood: \t\t', exp(poisson_log_likelihood(data, lambda_hat)))
print('Log-Likelihood: \t', poisson_log_likelihood(data, lambda_hat))
print()
print('Gaussian')
print('Mu (MLE): \t\t', mu_hat)
print('Var (MLE): \t\t', var_hat)
print('Likelihood: \t\t', gaussian_likelihood(data, mu_hat, var_hat))
print('Log-Likelihood: \t', gaussian_log_likelihood(data, mu_hat, var_hat))
# plot gaussian distribution
data_range = np.arange(0, np.max(data), step=1)
gaussian_densities = list(
map(
lambda x: gaussian_likelihood( | np.array([x]) | numpy.array |
"""
Plot anomalies for Arctic and Antarctic sea ice extents of the current
year from Sea Ice Index 3 (NSIDC). Total anomaly (global) is also shown.
Website : ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/
Author : <NAME>
Date : 5 September 2016
"""
### Import modules
import numpy as np
import urllib.request
import urllib as UL
import datetime
import matplotlib.pyplot as plt
### Directory and time
directoryfigure = './Figures/'
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
currentdoy = now.timetuple().tm_yday
### Load url
url = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/' \
'N_seaice_extent_daily_v3.0.csv'
### Read Arctic file
raw_data = UL.request.urlopen(url)
dataset = np.genfromtxt(raw_data, skip_header=2,delimiter=',',
usecols=[0,1,2,3,4])
print('\nCompleted: Read sea ice data!')
### Set missing data to nan
dataset[np.where(dataset==-9999)] = np.nan
### Variables
year = dataset[:,0]
month = dataset[:,1]
day = dataset[:,2]
iceAR = dataset[:,3]
missing = dataset[:,4]
### Find current year
yr2018 = np.where(year == 2018)[0]
iceAR18 = iceAR[yr2018]
### Ice unit Conversion
icevalAR = iceAR18 * 1e6
###########################################################################
###########################################################################
###########################################################################
### Reads in 1981-2010 means
### Load url
url2 = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/' \
'N_seaice_extent_climatology_1981-2010_v3.0.csv'
### Read file
raw_data2 = UL.request.urlopen(url2)
dataset2 = np.genfromtxt(raw_data2, skip_header=2,delimiter=',',
usecols=[0,1,2])
### Create variables
doy = dataset2[:,0]
meaniceAR = dataset2[:,1] * 1e6
std = dataset2[:,2]
### Anomalies
currentanomAR = icevalAR-meaniceAR[:currentdoy-1]
###########################################################################
###########################################################################
###########################################################################
### Antarctic file
### Load url
url = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/south/daily/data/' \
'S_seaice_extent_daily_v3.0.csv'
### Read file
raw_data = UL.request.urlopen(url)
dataset = np.genfromtxt(raw_data, skip_header=2,delimiter=',',
usecols=[0,1,2,3,4])
print('\nCompleted: Read sea ice data!')
### Set missing data to nan
dataset[np.where(dataset==-9999)] = np.nan
### Variables
year = dataset[:,0]
month = dataset[:,1]
day = dataset[:,2]
iceAA = dataset[:,3]
missing = dataset[:,4]
### Find current year
yr2018 = np.where(year == 2018)[0]
iceAA18 = iceAA[yr2018]
### Ice Conversion
icevalAA = iceAA18 * 1e6
###########################################################################
###########################################################################
###########################################################################
### Reads in 1981-2010 means
### Load url
url2 = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/south/daily/data/' \
'S_seaice_extent_climatology_1981-2010_v3.0.csv'
### Read file
raw_data2 = UL.request.urlopen(url2)
dataset2 = np.genfromtxt(raw_data2, skip_header=2,delimiter=',',
usecols=[0,1,2])
### Create variables
doy = dataset2[:,0]
meaniceAA = dataset2[:,1] * 1e6
### Anomalies
currentanomAA = icevalAA-meaniceAA[:currentdoy-1]
###########################################################################
###########################################################################
###########################################################################
### Total Anomaly
totalanom = (currentanomAR + currentanomAA) / 1e6
currentanomAR = currentanomAR/1e6
currentanomAA = currentanomAA/1e6
print('Completed script!')
###########################################################################
###########################################################################
###########################################################################
### Create plot
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='white')
plt.rc('xtick',color='white')
plt.rc('ytick',color='white')
plt.rc('axes',labelcolor='white')
plt.rc('axes',facecolor='black')
fig = plt.figure()
ax = plt.subplot(111)
xlabels = [r'Jan',r'Feb',r'Mar',r'Apr',r'May',r'Jun',r'Jul',
r'Aug',r'Sep',r'Oct',r'Nov',r'Dec',r'Jan']
plt.xticks( | np.arange(0,366,30.4) | numpy.arange |
import numpy as np
from scipy.optimize import curve_fit
import logging
class stfmrSpectraFitting:
def __init__(self, hArray, vArray, frequency, inputFileName):
self.fieldArray = hArray
self.amplitudeArray = vArray
#self.fieldArray, self.amplitudeArray = self.assignFieldAndAmpArray(hArray, vArray, minRange, maxRange)
self.deltaH = self.findDh(self.fieldArray, self.amplitudeArray) #Units: T
self.Hres = self.fieldArray[int(len(self.fieldArray)/2)] #Units: T
self.frequency = frequency #Units : Hz
self.V0 = (self.amplitudeArray[0] + self.amplitudeArray[-1])/2
self.V1 = 0
self.Vsym = np.amax(self.amplitudeArray) + np.amin(self.amplitudeArray) - 2*self.V0
self.Vas = np.amax(self.amplitudeArray) - | np.amin(self.amplitudeArray) | numpy.amin |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import unittest
import numpy as np
from singa import loss
from singa import tensor
class TestLoss(unittest.TestCase):
def setUp(self):
self.x_np = np.asarray([[0.9, 0.2, 0.1],
[0.1, 0.4, 0.5],
[0.2, 0.4, 0.4]],
dtype=np.float32)
self.y_np = np.asarray([[1, 0, 1],
[0, 1, 1],
[1, 0, 0]],
dtype=np.float32)
self.x = tensor.from_numpy(self.x_np)
self.y = tensor.from_numpy(self.y_np)
def test_sigmoid_cross_entropy(self):
sig = loss.SigmoidCrossEntropy()
l1 = sig.forward(True, self.x, self.y)
sig.backward()
l2 = sig.evaluate(True, self.x, self.y)
p = 1.0 / (1 + np.exp(-self.x_np))
l = - (self.y_np * np.log(p) + (1 - self.y_np) * np.log(1 - p))
self.assertAlmostEqual(l1.l1(), l2)
self.assertAlmostEqual(l1.l1(), | np.average(l) | numpy.average |
"""A collection of core functions."""
import logging
import os
from collections import defaultdict
from typing import Union
import numpy as np
import xarray as xr
from scipy.spatial.distance import pdist, squareform
logger = logging.getLogger(os.path.basename(__file__))
def area_weighted_mean(data_array: 'xr.DataArray') -> 'xr.DataArray':
"""Calculate area mean weighted by the latitude.
Returns a data array consisting of N values, where N == number of
ensemble members.
"""
weights_lat = np.cos(np.radians(data_array.lat))
means = data_array.weighted(weights_lat).mean(dim=['lat', 'lon'])
return means
def distance_matrix(values: 'np.ndarray',
weights: 'np.ndarray' = None) -> 'np.ndarray':
"""Calculate the pairwise distance between model members.
Takes a dataset with ensemble member/lon/lat. Flattens lon/lat
into a single dimension. Calculates the distance between every
ensemble member.
If weights are passed, they should have the same shape as values.
Returns 2D NxN array, where N == number of ensemble members.
"""
n_members = values.shape[0]
values = values.reshape(n_members, -1)
# pdist does not work with NaN
not_nan = np.where(np.all(np.isfinite(values), axis=0))[0]
values = values[:, not_nan]
if weights is not None:
# Reshape weights to match values array
weights = weights.reshape(n_members, -1)
weights = weights[:, not_nan]
weights = weights[0] # Weights are equal along first dim
d_matrix = squareform(pdist(values, metric='euclidean', w=weights))
return d_matrix
def calculate_model_distances(
data_array: 'xr.DataArray',
dimension: str = 'model_ensemble_reference') -> 'xr.DataArray':
"""Calculate pair-wise distances between all values in data_array.
Distances are calculated as the area weighted euclidean distance
between each pair of models in data_array. Returned is a square matrix
with where the number of elements along each edge equals the number
of ensemble members.
Parameters
----------
data_array : array_like, shape (N,...)
Array of (2 dimensional) model fields.
dimension : string
Name of the newly created reference dimension (default:
'model_ensemble_reference'. Must not be equal to the existing
model dimension ('model_ensemble')!
Returns
-------
distances : array_like, shape (N, N)
Symmetric matrix of pairwise model distances.
"""
assert dimension != 'model_ensemble', f'{dimension} != "model_ensemble"'
weights = np.cos(np.radians(data_array.lat))
weights, _ = xr.broadcast(weights, data_array)
diff = xr.apply_ufunc(
distance_matrix,
data_array,
weights,
input_core_dims=[['model_ensemble', 'lat', 'lon'],
['model_ensemble', 'lat', 'lon']],
output_core_dims=[[dimension, 'model_ensemble']],
)
diff.name = f'd{data_array.name}'
diff.attrs['variable_group'] = data_array.name
diff.attrs["units"] = data_array.units
diff[dimension] = diff.model_ensemble.values
return diff
def compute_overall_mean(dataset: 'xr.Dataset',
weights: dict) -> 'xr.DataArray':
"""Normalize all variables in a dataset and return their weighted mean.
Relative weights for each variable group are passed via the recipe.
"""
normalized = dataset / dataset.median()
weights_selected = xr.DataArray(
[weights[variable_group] for variable_group in dataset],
coords={'variable_group': list(dataset)},
dims='variable_group')
overall_mean = normalized.to_array(
dim='variable_group').weighted(weights_selected).mean('variable_group')
overall_mean.name = 'overall_mean'
overall_mean.attrs['variable_group'] = 'overall_mean'
overall_mean.attrs['units'] = '1'
return overall_mean
def combine_ensemble_members(
dataset: Union['xr.DataArray', None],
dimensions: Union[str, list] = 'model_ensemble',
) -> (Union['xr.DataArray', None], dict):
"""Combine ensemble members of the same model.
Parameters
----------
dataset : None or data_array, shape (N,) or (N, N)
A vector containing model-observations distances or a matrix containing
model-model distances.
dimensions : string or list of up to two strings
Spezifies the dimensions along which ensemble members are combined.
Returns
-------
dataset : None or data_array, shape (M,), (M, L) with M, L <= N
data_array where ensemble members along the given dimensions are
combined by averaging.
groups : dict of form {string: list}
Dictionary mapping the combined model names (keys) to the original
ensemble member names (values).
"""
if isinstance(dimensions, str):
dimensions = [dimensions]
assert len(
dimensions) <= 2, 'dimensions can contain a maximum of two strings'
if dataset is None:
return None, {}
groups = defaultdict(list)
models = []
for name in dataset['model_ensemble'].values:
model = name.split('_')[0]
groups[model].append(name)
models.append(model)
for dimension in dimensions:
if dimension in dataset.dims:
model = xr.DataArray(models, dims=dimension)
dataset = dataset.groupby(model).mean(keep_attrs=True).rename(
{'group': dimension})
if len(dimensions) == 2:
# need to set the diagonal elements back to zero after averaging
dataset.values[np.diag_indices(dataset['model_ensemble'].size)] = 0
return dataset, groups
def calculate_weights_data(
performance: Union['np.array', None],
independence: Union['np.array', None],
performance_sigma: Union[float, None],
independence_sigma: Union[float, None]) -> 'np.array':
"""Calculate normalized weights for each model N.
Parameters
----------
performance : array_like, shape (N,) or None
Array specifying the model performance. None is mutually exclusive
with independence being None. Single values in performance can be
nan, then they will be excluded from the independence calculation as
well (used for the perfect model test).
independence : array_like, shape (N, N) or None
Array specifying the model independence. None is mutually exclusive
with performance being None.
performance_sigma : float or None
Sigma value defining the form of the weighting function
for the performance. Can be one only if performance is also None.
independence_sigma : float or None
Sigma value defining the form of the weighting function
for the independence. Can be one only if independence is also None.
Returns
-------
weights : ndarray, shape (N,)
"""
numerator = 1
not_nan = True
denominator = 1
if performance is not None:
numerator = np.exp(-((performance / performance_sigma)**2))
# nans in the performance vector indicate models to be excluded
not_nan = np.isfinite(performance)
if independence is not None:
# don't consider nan models for independence of other models!
exp = np.exp(-((independence[:, not_nan] / independence_sigma)**2))
# Note diagonal = exp(0) = 1, thus this is equal to 1 + sum(i!=j)
denominator = exp.sum(axis=1)
weights = numerator / denominator
weights /= weights.sum(where=not_nan)
return weights
def calculate_weights(
performance: Union['xr.DataArray', None],
independence: Union['xr.DataArray', None],
performance_sigma: Union[float, None],
independence_sigma: Union[float, None]) -> 'xr.DataArray':
"""Xarray wrapper for calculate_weights_data."""
performance_core_dims = [] if performance is None else ['model_ensemble']
independence_core_dims = [] if independence is None else [
'model_ensemble', 'model_ensemble_reference'
]
weights = xr.apply_ufunc(
calculate_weights_data,
performance,
independence,
performance_sigma,
independence_sigma,
input_core_dims=[
performance_core_dims, independence_core_dims, [], []
],
output_core_dims=[['model_ensemble']],
vectorize=True,
)
weights.name = 'weight'
weights.attrs['variable_group'] = 'weight' # used in barplot
weights.attrs['units'] = '1'
return weights
def weighted_quantile(values: list,
quantiles: list,
weights: list = None) -> 'np.array':
"""Calculate weighted quantiles.
Analogous to np.quantile, but supports weights.
Based on: https://stackoverflow.com/a/29677616/6012085
Parameters
----------
values: array_like
List of input values.
quantiles: array_like
List of quantiles between 0.0 and 1.0.
weights: array_like
List with same length as `values` containing the weights.
Returns
-------
np.array
Numpy array with computed quantiles.
"""
values = np.array(values)
quantiles = np.array(quantiles)
if weights is None:
weights = np.ones(len(values))
weights = np.array(weights)
# remove nans
not_nan = np.where((np.isfinite(values) & | np.isfinite(weights) | numpy.isfinite |
import random
import numpy as np
import cv2
import matplotlib.pyplot as plt
# from matplotlib import pyplot as plt
import scipy
from scipy import signal
from PIL import Image
from scipy.ndimage import median_filter
# 由于卷积核的大小一般是奇数,因此这里假设卷积核是奇数的
'''
####################
图像处理的基本函数
####################
'''
# 图像加框
def addBoundary(img, kernel):
'''
给图像添加边界
:param img: 输入图像
:param kernel:卷积核
:return: 加边界后的图像
'''
kernel_size = kernel.shape[0]
addLine = (int)((kernel_size - 1) / 2)
img_ = cv2.copyMakeBorder(img, addLine, addLine, addLine, addLine, cv2.BORDER_CONSTANT, value=0);
return img_
def convolve1(img, kernel, filter_type, mode='same'):
'''
单通道图像与卷积核的卷积,主要用于灰度图
:param img: 输入单通道图像矩阵
:param kernel: 卷积核
:param model: medium,gauss,mean, 即选择中值滤波、高斯滤波、还是均值滤波,其他滤波方式以后添加
:return: 卷积后的图像
'''
if mode == 'same':
img_ = addBoundary(img, kernel)
kernel_height = kernel.shape[0]
kernel_width = kernel.shape[1]
# 横向卷积、纵向卷积的次数
conv_height = img_.shape[0] - kernel_height + 1
conv_width = img_.shape[1] - kernel_width + 1
# 卷积结果存储在conv中
conv = np.zeros((conv_height, conv_width), dtype='uint8')
for i in range(conv_height):
for j in range(conv_width):
conv[i][j] = wise_element_sum(img_[i:i + kernel_height, j:j + kernel_width], kernel, filter_type)
return conv
def wise_element_sum(img, kernel, filter_type):
'''
对于某一次卷积结果的取值
:param img: 输入的图片片段矩阵
:param kernel: 卷积核
:param modle: medium,gauss,mean, 即选择中值滤波、高斯滤波、还是均值滤波,其他滤波方式以后添加
:return: 返回该像素值
'''
if filter_type == 'medium_Filter':
temp = img * kernel
list = []
for i in range(temp.shape[0]):
for j in range(temp.shape[1]):
list.append(temp[i][j])
list.sort()
if list[int(len(list) / 2)] > 255:
return 255
elif list[int(len(list) / 2)] < 0:
return 0
else:
return list[int(len(list) / 2)]
# 均值、高斯滤波等
else:
result = (img * kernel).sum()
if result < 0:
return 0
elif result > 255:
return 255
else:
return result
def convolve(img, kernel, filter_type, mode='same'):
'''
三通道卷积,主要用于彩色图
:param img: 输入图像矩阵
:param kernel: 卷积核
:param mode: medium,gauss,mean, 即选择中值滤波、高斯滤波、还是均值滤波,其他滤波方式以后添加
:return: 卷积后的图像矩阵
'''
R = np.mat(img[:, :, 0])
G = np.mat(img[:, :, 1])
B = np.mat(img[:, :, 2])
conv_B = convolve1(img[:, :, 0], kernel, filter_type, mode)
conv_G = convolve1(img[:, :, 1], kernel, filter_type, mode)
conv_R = convolve1(img[:, :, 2], kernel, filter_type, mode)
conv_img = np.dstack([conv_B, conv_G, conv_R])
return conv_img
'''
############################################
噪声函数
脉冲噪声:add_PulseNoise(img, SNR)
椒盐噪声:add_Salt_PepperNoise(img, SNR)
高斯噪声:add_Gauss_Noise(img, mean, sigma)
#############################################
'''
# 添加脉冲噪声
def add_PulseNoise(img, SNR):
'''
给图像添加脉冲噪声
:param img: 输入图像
:param SNR: 信噪比,决定添加多少噪声
:return: 添加噪声后的图像
'''
rows, cols, dims = img.shape
# 创建与图像大小一样的矩阵
R = | np.mat(img[:, :, 0]) | numpy.mat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Evaluation config: line 489-497 for mitigated embedding, p822-824 for original embedding
import json, codecs, time, re, os
import gensim
import logging
import numpy as np
import pandas as pd
from gensim.models import word2vec, FastText
from gensim.test.utils import datapath
from sklearn.decomposition import PCA
from sklearn import svm, metrics
from sklearn.metrics import accuracy_score, roc_auc_score, precision_score, recall_score, confusion_matrix
from collections import OrderedDict, defaultdict
from copy import deepcopy
# for visualizing
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import random
import config, mitigating_stereotypes, base_words
from config import SOURCE_DIR
start_time = time.time()
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# Basic constants
DEFAULT_ARGUMENTS_W2V = dict(workers=4, sg=1, size=300, window=5, min_count=5, sample=10^-4, negative=5, seed=1, iter=2)
DEFAULT_ARGUMENTS_FT = dict(**DEFAULT_ARGUMENTS_W2V, min_n=3, max_n=6)
SVM_Cs = [10]
### UCI setting ###
SMALL_UCI_NUM = 32561
def load_professions(fname):
with codecs.open(fname, 'r', encoding='utf-8', errors='ignore') as f:
professions = json.load(f)
print('Loaded professions\n' +
'Format:\n' +
'word,\n' +
'definitional female -1.0 -> definitional male 1.0\n' +
'stereotypical female -1.0 -> stereotypical male 1.0')
return professions
sensitive_pair, neutral_word_list = config.load_analogy_pair(SOURCE_DIR + 'minority_groups.txt')
def load_UCI():
X_train, y_train, X_test, y_test = [], [], [], []
with codecs.open(SOURCE_DIR + 'UCI_adult_dataset.txt', 'r', encoding='utf-8', errors='ignore') as f:
for i, line in enumerate(re.split('[\r\n]+', f.read())):
line = re.sub(r' ', '', line)
tokens = re.split(r',', line)
if len(tokens) == 15:
X_train.append(tokens[:-1])
y_train.append(tokens[-1])
with codecs.open(SOURCE_DIR + 'UCI_adult_test.txt', 'r', encoding='utf-8', errors='ignore') as f:
for i, line in enumerate(re.split('[\r\n]+', f.read())):
if i == 0:
continue
line = re.sub(r' ', '', line)
tokens = re.split(r',', line)
if len(tokens) == 15:
X_test.append(tokens[:-1])
y_test.append(tokens[-1])
print("### UCI train set statistics ###")
UCI_stats_by_gender(X_train[:SMALL_UCI_NUM], y_train[:SMALL_UCI_NUM])
print("### UCI test set statistics ###")
UCI_stats_by_gender(X_test, y_test)
return (X_train, y_train), (X_test, y_test)
def word2rep(_X_train, _y_train, model):
X_train, y_train = [], []
avg_fnl_weight = np.array([int(e[2]) for e in _X_train]).mean()
avg_hpw_weight = np.array([int(e[12]) for e in _X_train]).mean()
for X, y in zip(_X_train, _y_train):
tmp_X = np.array([])
for token in X:
if not re.search(r'[a-zA-Z\-?]+', token):
#tmp_X = np.append(tmp_X, np.array([float(token)/10000]))
tmp_X = np.append(tmp_X, np.array([float(token)*avg_hpw_weight/avg_fnl_weight]))
tmp_X = np.append(tmp_X, np.zeros(np.shape(model.syn0[1])[0] - 1))
elif not config.CONSIDER_GENDER and (token == 'Male' or token == 'Female'):
continue
elif token in model.vocab:
tmp_X = np.append(tmp_X, model[token])
# compound with '-': only select first vocab without oov for regulating sizes of all X
elif re.search(r'-', token):
add_tokens = re.split(r'-', token)
i = 1
for add_token in add_tokens:
if add_token in model.vocab:
tmp_X = np.append(tmp_X, model[add_token])
i = 0
break
else:
continue
if i:
tmp_X = np.append(tmp_X, np.zeros(np.shape(model.syn0[1]), dtype=float))
else:
tmp_X = np.append(tmp_X, np.zeros(np.shape(model.syn0[1]), dtype=float))
if np.shape(tmp_X)[0] > 0:
X_train.append(tmp_X)
if re.search(r'>', y):
y_train.append(1)
else:
y_train.append(0)
return np.array(X_train), np.array(y_train)
def identify_index_by_gender(X, y):
stats_dict = {}
stats_dict['Male'] = []
stats_dict['Female'] = []
for i, (tokens, y) in enumerate(zip(X, y)):
stats_dict[tokens[9]].append(i)
return np.array(stats_dict['Male']), np.array(stats_dict['Female'])
def identify_index_by_race(X, y):
stats_dict = {}
stats_dict['Amer-Indian-Eskimo'] = []
stats_dict['Asian-Pac-Islander'] = []
stats_dict['Black'] = []
stats_dict['White'] = []
stats_dict['Other'] = []
for i, (tokens, y) in enumerate(zip(X, y)):
stats_dict[tokens[8]].append(i)
return np.array(stats_dict['Amer-Indian-Eskimo']), np.array(stats_dict['Asian-Pac-Islander']), \
np.array(stats_dict['Black']), np.array(stats_dict['White']), np.array(stats_dict['Other'])
def UCI_stats_by_gender(X, y):
stats_dict = {}
stats_dict['Male'] = [0, 0]
stats_dict['Female'] = [0, 0]
for tokens, label in zip(X, y):
stats_dict[tokens[9]][1 if re.search(r'>', label) else 0] += 1
print("<=50K Male:Female = {:.3f} / {:.3f} ({} / {})".format(stats_dict['Male'][0] / (stats_dict['Male'][0] + stats_dict['Female'][0]),
stats_dict['Female'][0] / (stats_dict['Male'][0] + stats_dict['Female'][0]),
stats_dict['Male'][0], stats_dict['Female'][0]))
print(" >50K Male:Female = {:.3f} / {:.3f} ({} / {})".format(stats_dict['Male'][1] / (stats_dict['Male'][1] + stats_dict['Female'][1]),
stats_dict['Female'][1] / (stats_dict['Male'][1] + stats_dict['Female'][1]),
stats_dict['Male'][1], stats_dict['Female'][1]))
return 0
def print_result(y_test, pred, test_male_index, test_female_index):
acc, auc, pre, rec = accuracy_score(y_test, pred), roc_auc_score(y_test, pred), \
precision_score(y_test, pred, average=None), recall_score(y_test, pred, average=None)
cnf_matrix = confusion_matrix(y_test, pred)
male_cnf_matrix = confusion_matrix(y_test[test_male_index], pred[test_male_index])
female_cnf_matrix = confusion_matrix(y_test[test_female_index], pred[test_female_index])
print(acc, auc, pre, rec)
print("<=50K Male:Female = {:.3f} / {:.3f} ({} / {})".format(np.sum(male_cnf_matrix, axis=0)[0] / np.sum(cnf_matrix, axis=0)[0],
np.sum(female_cnf_matrix, axis=0)[0] / np.sum(cnf_matrix, axis=0)[0],
np.sum(male_cnf_matrix, axis=0)[0], np.sum(female_cnf_matrix, axis=0)[0]))
print(" >50K Male:Female = {:.3f} / {:.3f} ({} / {})".format(np.sum(male_cnf_matrix, axis=0)[1] / np.sum(cnf_matrix, axis=0)[1],
np.sum(female_cnf_matrix, axis=0)[1] / np.sum(cnf_matrix, axis=0)[1],
np.sum(male_cnf_matrix, axis=0)[1], np.sum(female_cnf_matrix, axis=0)[1]))
fpr, fnr = print_cnf_matrix(cnf_matrix)
male_fpr, male_fnr = print_cnf_matrix(male_cnf_matrix)
female_fpr, female_fnr = print_cnf_matrix(female_cnf_matrix)
print("fpr_bias_ratio: {:.2f}, fnr_bias_ratio: {:.2f}".format(male_fpr / female_fpr, male_fnr / female_fnr))
print('-' * 30)
return fpr, fnr
def print_cnf_matrix(cnf_matrix, normalize=True):
print(cnf_matrix)
fpr, fnr = 0, 0
if normalize:
cnf_matrix = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]
print(cnf_matrix)
fpr = cnf_matrix[0, 1]
fnr = cnf_matrix[1, 0]
return fpr, fnr
# means predicted:X, target:y
def find_optimal_cutoff(predicted, target):
fpr, tpr, threshold = metrics.roc_curve(target, predicted)
i = np.arange(len(tpr))
roc = pd.DataFrame({'tf' : pd.Series(tpr-(1-fpr), index=i), 'threshold': pd.Series(threshold, index=i)})
roc_t = roc.ix[(roc.tf-0).abs().argsort()[:1]]
return list(roc_t['threshold']).pop()
class W2vModel(object):
def __init__(self, vocab_limit=None):
"""
:param is_selected_gender_vocab: 'True' means selected_gender_vocab is prepared.
:param remove_oov: remove words not in w2v.model vocab.
"""
# embedding models
self.w2v_fname = config.WORD_EMBEDDING_NAME
self.w2v_model = self.load_w2v_model(self.w2v_fname, vocab_limit)
if not vocab_limit:
self.w2v_model.init_sims() # for using wv.syn0norm
def load_w2v_model(self, fname, vocab_limit):
try:
try:
print('Loading W2v Model... in {0:.2f} seconds'.format(time.time() - start_time))
w2v_model = word2vec.Word2Vec.load(fname)
if vocab_limit: # it uses KeyedVector class (Word2vec.wv). Do not point wv.
tmp_w2v = gensim.models.KeyedVectors(vector_size=300)
tmp_w2v.index2word = w2v_model.wv.index2word[:vocab_limit]
tmp_w2v.vocab = {w: w2v_model.wv.vocab[w] for w in tmp_w2v.index2word}
# check if the order of keyedvector is broken
for i, w in enumerate(tmp_w2v.index2word):
if tmp_w2v.vocab[w].index != i:
print(w, tmp_w2v.vocab[w].index, i)
tmp_w2v.syn0 = w2v_model.wv.syn0[:vocab_limit, :]
w2v_model.wv.vocab = {}
w2v_model.wv.index2word = []
w2v_model.wv.syn0 = np.zeros((10, 300))
print(tmp_w2v)
return tmp_w2v
print(w2v_model)
except Exception as e:
w2v_model = word2vec.Word2VecKeyedVectors.load_word2vec_format(fname, binary=False)
if vocab_limit: # it uses KeyedVector class (Word2vec.wv). Do not point wv.
tmp_w2v = gensim.models.KeyedVectors(vector_size=300)
tmp_w2v.index2word = w2v_model.index2word[:vocab_limit]
tmp_w2v.vocab = {w: w2v_model.vocab[w] for w in tmp_w2v.index2word}
# check if the order of keyedvector is broken
for i, w in enumerate(tmp_w2v.index2word):
if tmp_w2v.vocab[w].index != i:
print(w, tmp_w2v.vocab[w].index, i)
tmp_w2v.syn0 = w2v_model.syn0[:vocab_limit, :]
w2v_model.vocab = {}
w2v_model.index2word = []
w2v_model.syn0 = np.zeros((10, 300))
print(tmp_w2v)
print('Success to load W2v Model... in {0:.2f} seconds'.format(time.time() - start_time))
return tmp_w2v
print(w2v_model)
print('Success to load W2v Model... in {0:.2f} seconds'.format(time.time() - start_time))
return w2v_model
except Exception as e:
print('No existed model. Training W2v Model... in {0:.2f} seconds'.format(time.time() - start_time))
texts = ''
if config.MODEL_NAME == 'wiki':
texts = config.WikiCorpus()
elif config.MODEL_NAME == 'reddit':
texts = config.RedditCorpus()
else:
print("please select corpus for training model.")
exit(1)
print('training w2v with {} corpus ... in {:.2f} seconds'.format(config.MODEL_NAME, config.whattime()))
w2v_model = word2vec.Word2Vec(texts, **DEFAULT_ARGUMENTS_W2V)
# init_sims: reduce memory but cannot continue training (because original vectors are removed.)
w2v_model.init_sims(replace=True)
#w2v_model.save(fname) # save model
self.w2v_model.save_word2vec_format(fname, binary=False)
print('Success to load W2v Model... in {0:.2f} seconds'.format(time.time() - start_time))
return w2v_model.wv
def test_intrinsic(self):
try:
self.w2v_model.wv.accuracy(SOURCE_DIR+'questions-words.txt', restrict_vocab=300000)
"""
analogy_score, result_list = self.w2v_model.wv.evaluate_word_analogies(datapath('questions-words.txt'))
print("score: {:.2f}".format(analogy_score))
for result_dict in result_list:
print("{}: True {} / False {}".format(result_dict['section'], result_dict['correct'][:3], result_dict['incorrect'][:3]))
"""
except Exception as e:
self.w2v_model.accuracy(SOURCE_DIR + 'questions-words.txt', restrict_vocab=300000)
try:
similarities = self.w2v_model.wv.evaluate_word_pairs(datapath('wordsim353.tsv'), restrict_vocab=300000)
except Exception as e:
similarities = self.w2v_model.evaluate_word_pairs(datapath('wordsim353.tsv'), restrict_vocab=300000)
def test_UCI(self, uci_dataset, small_train=True):
(_X_train, _y_train), (_X_test, _y_test) = uci_dataset
test_male_index, test_female_index = identify_index_by_gender(_X_test, _y_test)
# test_amer_index, test_asian_index, test_black_index, test_white_index, test_other_index = identify_index_by_race(_X_test, _y_test)
(X_train, y_train), (X_test, y_test) = word2rep(_X_train, _y_train, self.w2v_model), word2rep(_X_test, _y_test,
self.w2v_model)
assert len(X_train) == len(y_train)
assert len(X_test) == len(y_test)
print("num of tests / num of labels: {} {} / {} {} in {:.2f} sec".format(
len(X_train), len(X_test), len(set(y_train)), len(set(y_test)), time.time() - start_time))
for c in SVM_Cs:
clf = svm.SVC(C=c)
if small_train:
clf.fit(X_train[:SMALL_UCI_NUM], y_train[:SMALL_UCI_NUM])
else:
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
if not os.path.exists(SOURCE_DIR + 'pred_UCI'):
os.makedirs(SOURCE_DIR + 'pred_UCI')
with codecs.open(SOURCE_DIR + 'pred_UCI/w2v_' + config.MODEL_NAME + str(c) + '_pred.txt', 'w', encoding='utf-8', errors='ignore') as f:
for tokens, label in zip(_X_test, pred):
f.write('\t'.join(tokens) + '\t' + str(label) + '\n')
print_result(y_test, pred, test_male_index, test_female_index)
return 0
def test_analogy(self):
for w1, w2 in sensitive_pair:
for word in neutral_word_list:
try:
print('{}:{} = {}:{}'.format(
w1, w2, word, self.w2v_model.most_similar(positive=[w2, word], negative=[w1], topn=10)))
except Exception as e:
continue
def save(self, fname):
self.w2v_model.save_word2vec_format(fname, binary=False)
def save_vocab(self):
"""
Setting 4: remove noun particle / foreign words / digit and gender_specific suffix / prefix.
After that, only remain the data between upper and lower cut off based on frequency.
:return:
"""
with codecs.open(SOURCE_DIR + '{}_vocabs.txt'.format(config.MODEL_NAME), "w", encoding='utf-8',
errors='ignore') as write_file:
tmp_vocab = OrderedDict()
for word, vocab_obj in sorted(self.w2v_model.wv.vocab.items(), key=lambda item: -item[1].count):
if re.search(r'^[a-zA-Z][a-zA-Z0-9]{0,}$', word):
tmp_vocab[word] = vocab_obj
write_file.write('{0}\t{1}\n'.format(word, vocab_obj.count))
print("Success to save wiki vocabulary.")
self.w2v_vocab = tmp_vocab
def get_keyedvectors(self):
return self.w2v_model
class FtModel(object):
def __init__(self):
"""
:param is_selected_gender_vocab: 'True' means selected_gender_vocab is prepared.
:param remove_oov: remove words not in w2v.model vocab.
"""
# embedding models
self.ft_fname = config.MODEL_DIR + 'ft_{0}_sg_300_neg5_it2.model'.format(config.MODEL_NAME)
self.ft_model = self.load_ft_model(self.ft_fname)
def load_ft_model(self, fname):
"""
class FastText(sentences=None, sg=0, hs=0, size=100, alpha=0.025, window=5, min_count=5, max_vocab_size=None,
word_ngrams=1, sample=0.001, seed=1, workers=3, min_alpha=0.0001, negative=5, cbow_mean=1, hashfxn=hash, iter=5,
null_word=0, min_n=3, max_n=6, sorted_vocab=1, bucket=2000000, trim_rule=None, batch_words=MAX_WORDS_IN_BATCH)
min_n : int
Min length of char ngrams to be used for training word representations.
max_n : int
Max length of char ngrams to be used for training word representations.
Set max_n to be lesser than min_n to avoid char ngrams being used.
word_ngrams : int {1,0}
If 1, uses enriches word vectors with subword(ngrams) information. If 0, this is equivalent to word2vec.
bucket : int
Character ngrams are hashed into a fixed number of buckets, in order to limit the memory usage of the model.
This option specifies the number of buckets used by the model.
"""
print('Loading Fasttext Model... in {0:.2f} seconds'.format(time.time() - start_time))
try:
fasttext_model = FastText.load(fname)
print(fasttext_model)
except IOError:
print('No existed model. Training Ft Model... in {0:.2f} seconds'.format(time.time() - start_time))
texts = config.WikiCorpus()
fasttext_model = FastText(texts, **DEFAULT_ARGUMENTS_FT)
fasttext_model.save(fname)
print('Success to load Fasttext Model... in {0:.2f} seconds'.format(time.time() - start_time))
return fasttext_model
def test(self):
self.ft_model.wv.accuracy(SOURCE_DIR + 'questions-words.txt')
similarities = self.ft_model.wv.evaluate_word_pairs(datapath('wordsim353.tsv'))
# print(similarities)
class MyModel(object):
def __init__(self, threshold=None, space_order=[1, 1]):
"""
:param is_selected_gender_vocab: 'True' means selected_gender_vocab is prepared.
:param remove_oov: remove words not in w2v.model vocab.
"""
# embedding models
self.my_fname = config.MITIGATED_EMBEDDING_NAME
self.my_model = self.load_w2v_model(self.my_fname)
self.init_modulate = np.shape(self.my_model.syn0)[1]
self._modulate_vector_linalg(dim=1, dim2=1)
self.threshold = threshold
self.space_order = space_order
self.modulated_number = 0
def load_w2v_model(self, fname, arranged_savfile=True):
try:
print('Loading My Model... in {0:.2f} seconds'.format(time.time() - start_time))
if not arranged_savfile:
w2v_model = gensim.models.KeyedVectors.load(fname)
wi = {w: i for i, w in enumerate(w2v_model.index2word)}
w2v_model.vocab = {word: config.Vocab(count=count, index=wi[word]) for word, count in w2v_model.vocab.items()}
w2v_model.save_word2vec_format(fname, binary=False)
my_model = word2vec.Word2VecKeyedVectors.load_word2vec_format(fname, binary=False)
#my_model = word2vec.Word2Vec.load(fname + 'w2vf')
print(my_model)
except IOError:
print('No existed model. Training My Model... in {0:.2f} seconds'.format(time.time() - start_time))
print("constructing")
exit()
print('Success to load My Model... in {0:.2f} seconds'.format(time.time() - start_time))
return my_model
def _modulate_vector_linalg(self, dim=1, dim2=1):
self.my_model.syn0[:, :dim + dim2] = self.my_model.syn0[:, :dim + dim2] / self.init_modulate
def modulate_sentiment(self, dim=1, dim2=1, intensity=1):
assert len(self.space_order) < 3, "please set space_order with type 'list' (e.g. [1, 1])."
if self.threshold and self.space_order[1] == 1: # modulate sentiment only for entity words
self.my_model.syn0[:, :dim] = np.multiply(self.my_model.syn0[:, :dim],
np.where(self.my_model.syn0[:, dim:dim + dim2] >= (self.threshold / self.init_modulate),
intensity, 1))
elif self.threshold and self.space_order[1] == -1: # modulate sentiment only for entity words
self.my_model.syn0[:, :dim] = np.multiply(self.my_model.syn0[:, :dim],
np.where(self.my_model.syn0[:, dim:dim + dim2] <= -(self.threshold / self.init_modulate),
intensity, 1))
else: # modulate sentiment for entire words
self.my_model.syn0[:, :dim] = self.my_model.syn0[:, :dim] * intensity
self.my_model.syn0norm = (self.my_model.syn0 / np.sqrt((self.my_model.syn0 ** 2).sum(-1))[..., np.newaxis]).astype(float)
self.modulated_number += intensity*1
# self.my_model.init_sims(replace=True)
# it makes syn0 and vectors to be also normalized (same as syn0norm and vectors_norm)
def modulate_all(self, dim=1, dim2=1, intensity=1):
if intensity < 1:
assert len(self.space_order) < 3, "please set space_order with type 'list' (e.g. [1, 1])."
self.my_model.syn0[:, :dim+dim2] = self.my_model.syn0[:, :dim+dim2] * intensity
# self.my_model.init_sims(replace=True)
# it makes syn0 and vectors to be also normalized (same as syn0norm and vectors_norm)
self.my_model.syn0norm = (
self.my_model.syn0 / np.sqrt((self.my_model.syn0 ** 2).sum(-1))[..., np.newaxis]).astype(float)
def test(self, uci_dataset, intensity_order=1):
for i, intensity in enumerate([1, 10]): #, 10, 10]):
if i == 0 and intensity_order < 0:
continue
print("Model with intensity 10^{}, threshold {}".format(i*intensity_order, self.threshold))
self.modulate_sentiment(intensity=intensity**intensity_order)
#self.test_analogy()
self.test_UCI(uci_dataset)
self.test_intrinsic()
#self.show_vocab_tsnescatterplot()
#self.show_topn_embedding()
print("Model with intensity 0, threshold {}".format(self.threshold))
self.modulate_sentiment(intensity=0)
#self.test_analogy()
self.test_UCI(uci_dataset)
self.test_intrinsic()
def test_intrinsic(self):
self.my_model.accuracy(SOURCE_DIR + 'questions-words.txt', restrict_vocab=300000)
similarities = self.my_model.evaluate_word_pairs(datapath('wordsim353.tsv'), restrict_vocab=300000)
print(similarities)
def test_analogy(self):
for w1, w2 in sensitive_pair:
for word in neutral_word_list:
try:
print('{}:{} = {}:{}'.format(
w1, w2, word, self.my_model.most_similar(positive=[w2, word], negative=[w1], topn=10)))
except Exception as e:
continue
def test_UCI(self, uci_dataset, small_train=True):
(_X_train, _y_train), (_X_test, _y_test) = uci_dataset
test_male_index, test_female_index = identify_index_by_gender(_X_test, _y_test)
(X_train, y_train), (X_test, y_test) = word2rep(_X_train, _y_train, self.my_model), word2rep(_X_test, _y_test,
self.my_model)
assert len(X_train) == len(y_train)
assert len(X_test) == len(y_test)
print("num of tests / num of labels: {} {} / {} {} in {:.2f} sec".format(
len(X_train), len(X_test), len(set(y_train)), len(set(y_test)), time.time() - start_time))
for c in SVM_Cs:
clf = svm.SVC(C=c)
if small_train:
clf.fit(X_train[:SMALL_UCI_NUM], y_train[:SMALL_UCI_NUM])
else:
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
with codecs.open(SOURCE_DIR + 'pred_UCI\\my' + str(self.modulated_number) + '_' + config.MODEL_NAME + str(c) + '_pred.txt', 'w', encoding='utf-8', errors='ignore') as f:
for tokens, label in zip(_X_test, pred):
f.write('\t'.join(tokens) + '\t' + str(label) + '\n')
print_result(y_test, pred, test_male_index, test_female_index)
return 0
def show_topn_affect(self, dim=1, dim2=1, topn=50):
sort_index_sum = np.ndarray.flatten(self.my_model.vectors[:, :dim]).argsort()
sort_index = np.prod(self.my_model.vectors[:, :dim+dim2], axis=1).argsort()
cond = np.ndarray.flatten(self.my_model.vectors[sort_index, dim:dim+dim2]) >= (
self.threshold / self.init_modulate) if self.space_order[1] == 1 else \
np.ndarray.flatten(self.my_model.vectors[sort_index, dim:dim+dim2]) <= -(
self.threshold / self.init_modulate)
print("< top {} positive stereotypes >".format(topn))
if self.space_order[0] * self.space_order[1] == 1:
for index in sort_index[cond][:-1-topn:-1]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
else:
for index in sort_index[cond][:topn]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
print("< top {} negative stereotypes >".format(topn))
if self.space_order[0] * self.space_order[1] == 1:
for index in sort_index[cond][:topn]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
else:
for index in sort_index[cond][:-1-topn:-1]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
def show_vocab_tsnescatterplot(self, dim=1, dim2=1, shown_word=60, top=False):
sort_index = np.prod(self.my_model.vectors[:, :dim + dim2], axis=1).argsort()
cond = np.ndarray.flatten(self.my_model.vectors[sort_index, dim:dim + dim2]) >= (
self.threshold / self.init_modulate) if self.space_order[1] == 1 else \
np.ndarray.flatten(self.my_model.vectors[sort_index, dim:dim + dim2]) <= -(
self.threshold / self.init_modulate)
# get random words
# close_words = model.similar_by_word(word)
if top:
entity_words = list(sort_index[cond][::self.space_order[1]])[:int(shown_word / 2)]
notity_words = list(sort_index[np.logical_not(cond)][::-self.space_order[1]])[:int(shown_word / 2)]
else:
entity_words = random.sample(list(sort_index[cond]), int(shown_word / 2))
notity_words = random.sample(list(sort_index[np.logical_not(cond)]), int(shown_word / 2))
# add the vector for each of the closest words to the array
arr, word_labels = np.empty((0, 300), dtype='f'), []
for index in entity_words + notity_words:
wrd_vector = self.my_model.syn0norm[index]
word_labels.append(self.my_model.index2word[index])
arr = np.append(arr, np.array([wrd_vector]), axis=0)
# find tsne coords for 1 dimensions
tsne = TSNE(n_components=1, random_state=0)
np.set_printoptions(suppress=True)
x_coords = arr[:, 1]
y_coords = arr[:, 0]
# display scatter plot
plt.scatter(x_coords, y_coords)
for label, x, y in zip(word_labels, x_coords, y_coords):
plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points')
plt.xlim(x_coords.min() + 0.05, x_coords.max() + 0.05)
plt.ylim(y_coords.min() + 0.05, y_coords.max() + 0.05)
plt.show()
def show_topn_embedding(self, dim=1, dim2=1, topn=30):
sort_index_sent = np.sum(self.my_model.vectors[:, :dim], axis=1).argsort()
if self.space_order[0] == -1:
print("< top {} positive stereotypes >".format(topn))
for index in sort_index_sent[:topn]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
print("< top {} negative stereotypes >".format(topn))
for index in sort_index_sent[:-1-topn:-1]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
else:
print("< top {} positive stereotypes >".format(topn))
for index in sort_index_sent[:-1-topn:-1]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
print("< top {} negative stereotypes >".format(topn))
for index in sort_index_sent[:topn]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
sort_index_sent = np.sum(self.my_model.vectors[:, dim:dim+dim2], axis=1).argsort()
if self.space_order[1] == -1:
print("< top {} entity stereotypes >".format(topn))
for index in sort_index_sent[:topn]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
print("< top {} notity stereotypes >".format(topn))
for index in sort_index_sent[:-1-topn:-1]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim+dim2])
else:
print("< top {} entity stereotypes >".format(topn))
for index in sort_index_sent[:-1 - topn:-1]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim + dim2])
print("< top {} notity stereotypes >".format(topn))
for index in sort_index_sent[:topn]:
print(self.my_model.index2word[index], self.my_model.vectors[index][:dim + dim2])
class DebiasModel(object):
def __init__(self, bias_model, same_env=True):
"""
:param is_selected_gender_vocab: 'True' means selected_gender_vocab is prepared.
:param remove_oov: remove words not in w2v.model vocab.
"""
# embedding models
print("same_env: {}".format(same_env))
if same_env:
self.model = self.debias_we_same_env(bias_model)
else:
self.model = self.debias_we(bias_model)
def debias_we(self, E):
print('Loading Debias Model... in {0:.2f} seconds'.format(time.time() - start_time))
with open(SOURCE_DIR + 'definitional_pairs.json', "r") as f:
definitional = json.load(f)
with open(SOURCE_DIR + 'equalize_pairs.json', "r") as f:
equalize = json.load(f)
with open(SOURCE_DIR + 'gender_specific_seed.json', "r") as f:
gender_specific_words = json.load(f)
tmp_w2v = gensim.models.KeyedVectors(vector_size=300)
tmp_w2v.index2word = E.index2word
tmp_w2v.vocab = E.vocab
tmp_w2v.syn0 = E.syn0
tmp_w2v.syn0norm = (E.syn0 / np.sqrt((E.syn0 ** 2).sum(-1))[..., np.newaxis]).astype(float)
gender_direction = self.doPCA(definitional, tmp_w2v).components_[0]
specific_set = set(gender_specific_words)
for i, w in enumerate(tmp_w2v.index2word):
if w not in specific_set:
tmp_w2v.syn0[i] = self.drop(tmp_w2v.syn0[i], gender_direction)
tmp_w2v.syn0norm = (tmp_w2v.syn0 / np.sqrt((tmp_w2v.syn0 ** 2).sum(-1))[..., np.newaxis]).astype(float)
candidates = {x for e1, e2 in equalize for x in [(e1.lower(), e2.lower()),
(e1.title(), e2.title()),
(e1.upper(), e2.upper())]}
print(candidates)
for (a, b) in candidates:
if (a in tmp_w2v.index2word and b in tmp_w2v.index2word):
y = self.drop((tmp_w2v[a] + tmp_w2v[b]) / 2, gender_direction)
z = np.sqrt(1 - np.linalg.norm(y) ** 2)
if (tmp_w2v[a] - tmp_w2v[b]).dot(gender_direction) < 0:
z = -z
tmp_w2v.syn0[tmp_w2v.vocab[a].index] = z * gender_direction + y
tmp_w2v.syn0[tmp_w2v.vocab[b].index] = -z * gender_direction + y
tmp_w2v.syn0norm = (tmp_w2v.syn0 / np.sqrt((tmp_w2v.syn0 ** 2).sum(-1))[..., np.newaxis]).astype(float)
print('Success to load Debias Model... in {0:.2f} seconds'.format(time.time() - start_time))
return tmp_w2v
def debias_we_same_env(self, E, random_sent_pair=False):
print('Loading Debias (same env.) Model... in {0:.2f} seconds'.format(time.time() - start_time))
print('example: {} \n {}'.format(np.array(E['Male']), np.array(E['Female'])))
lexicon = config.load_sent_lexicon()
lexicon2, lexicon2_vocab = config.load_entity_lexicon()
num = int(config.BASE_WORD_NUM)
if random_sent_pair:
positive_seeds, negative_seeds = mitigating_stereotypes.generate_random_seeds(lexicon, num=num)
else:
positive_seeds, negative_seeds = base_words.sent_seeds(10)
print(positive_seeds, negative_seeds)
entity_seeds, notity_seeds = mitigating_stereotypes.generate_random_seeds(lexicon2, num=num)
definitional = zip(positive_seeds, negative_seeds)
random_pos_seeds, random_neg_seeds = mitigating_stereotypes.generate_random_seeds(lexicon, num=num)
equalize = zip(random_pos_seeds, random_neg_seeds)
#notity_specific_words = notity_seeds
notity_specific_words = [item[0] for item in lexicon2.items() if item[1] == -1]
tmp_w2v = gensim.models.KeyedVectors(vector_size=300)
tmp_w2v.index2word = E.index2word
tmp_w2v.vocab = E.vocab
tmp_w2v.syn0 = E.syn0
tmp_w2v.syn0norm = (E.syn0 / np.sqrt((E.syn0 ** 2).sum(-1))[..., np.newaxis]).astype(float)
gender_direction = self.doPCA(definitional, tmp_w2v).components_[0]
specific_set = set(notity_specific_words)
for i, w in enumerate(tmp_w2v.index2word):
if w not in specific_set:
tmp_w2v.syn0[i] = self.drop(tmp_w2v.syn0[i], gender_direction)
tmp_w2v.syn0norm = (tmp_w2v.syn0 / np.sqrt((tmp_w2v.syn0 ** 2).sum(-1))[..., np.newaxis]).astype(float)
candidates = {x for e1, e2 in equalize for x in [(e1.lower(), e2.lower()),
(e1.title(), e2.title()),
(e1.upper(), e2.upper())]}
print(candidates)
for (a, b) in candidates:
if (a in tmp_w2v.index2word and b in tmp_w2v.index2word):
y = self.drop((tmp_w2v[a] + tmp_w2v[b]) / 2, gender_direction)
z = np.sqrt(1 - np.linalg.norm(y) ** 2)
if (tmp_w2v[a] - tmp_w2v[b]).dot(gender_direction) < 0:
z = -z
tmp_w2v.syn0[tmp_w2v.vocab[a].index] = z * gender_direction + y
tmp_w2v.syn0[tmp_w2v.vocab[b].index] = -z * gender_direction + y
tmp_w2v.syn0norm = (tmp_w2v.syn0 / np.sqrt((tmp_w2v.syn0 ** 2).sum(-1))[..., np.newaxis]).astype(float)
print('Success to load Debias (same env.) Model... in {0:.2f} seconds'.format(time.time() - start_time))
print('example: {} \n {}'.format(np.array(E['Male']), | np.array(E['Female']) | numpy.array |
import numpy as np
import pandas as pd
import seaborn as sns
import pickle
import warnings
from tqdm import tqdm
import os
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
pd.set_option('display.max_columns', 500)
warnings.filterwarnings('ignore')
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def cal_accuracy(y_pred, y_true):
y_pred = np.where(y_pred >= 0.5, 1, 0)
return (y_pred == y_true).mean()
def l2_logistic_regression(x, y, lr, lbd, dev_x, dev_y):
w = np.zeros(x.shape[1])
epsilon = 1e-8
min_loss = float('inf')
best_train_acc = 0
best_dev_acc = 0
train_acc_history = []
dev_acc_history = []
for i in tqdm(range(10000)):
gradient = ((x.multiply(y - sigmoid(x.dot(w)), axis=0)).mean())
w = w + lr * gradient
w[1:] = w[1:] - lr * lbd * w[1:]
train_acc = cal_accuracy(sigmoid(x.dot(w)), y)
dev_acc = cal_accuracy(sigmoid(dev_x.dot(w)), dev_y)
train_acc_history.append(train_acc)
dev_acc_history.append(dev_acc)
best_train_acc = max(best_train_acc, train_acc)
best_dev_acc = max(best_dev_acc, dev_acc)
loss = ((-1 * y * np.log(sigmoid(x.dot(w)))) - ((np.ones(x.shape[0]) - y) * np.log(
np.ones(x.shape[0]) - sigmoid(x.dot(w))))).mean() + lbd * np.sum(np.power(w[1:], 2))
# print("iter={}, loss={}, train_acc={}, dev_acc={}".format(i + 1, loss, train_acc, dev_acc))
min_loss = min(min_loss, loss)
if np.linalg.norm(gradient) <= epsilon:
# print("lr={}, min_loss={}".format(lr, min_loss))
break
loss = ((-1 * y * np.log(sigmoid(x.dot(w)))) - ((np.ones(x.shape[0]) - y) * np.log(
np.ones(x.shape[0]) - sigmoid(x.dot(w))))).mean() + lbd * np.sum(np.power(w[1:], 2))
train_acc = cal_accuracy(sigmoid(x.dot(w)), y)
dev_acc = cal_accuracy(sigmoid(dev_x.dot(w)), dev_y)
print("lr={}, lambda={}, min_loss={}, best_train_acc={}, best_dev_acc={}, loss={}, train_acc={}, dev_acc={}".format(
lr, lbd, min_loss,
best_train_acc, best_dev_acc,
loss, train_acc, dev_acc))
return w, train_acc_history, dev_acc_history
def l1_logistic_regression(x, y, lr, lbd, dev_x, dev_y):
w = np.zeros(x.shape[1])
epsilon = 1e-8
min_loss = float('inf')
best_train_acc = 0
best_dev_acc = 0
train_acc_history = []
dev_acc_history = []
for i in tqdm(range(10000)):
gradient = ((x.multiply(y - sigmoid(x.dot(w)), axis=0)).mean())
w = w + lr * gradient
w[1:] = np.sign(w[1:]) * np.maximum(np.abs(w[1:]) - (lr * lbd), np.zeros(w[1:].shape))
train_acc = cal_accuracy(sigmoid(x.dot(w)), y)
dev_acc = cal_accuracy(sigmoid(dev_x.dot(w)), dev_y)
train_acc_history.append(train_acc)
dev_acc_history.append(dev_acc)
best_train_acc = max(best_train_acc, train_acc)
best_dev_acc = max(best_dev_acc, dev_acc)
loss = ((-1 * y * np.log(sigmoid(x.dot(w)))) - ((np.ones(x.shape[0]) - y) * np.log(
np.ones(x.shape[0]) - sigmoid(x.dot(w))))).mean() + lbd * np.sum(np.abs(w[1:]))
# print("iter={}, loss={}, train_acc={}, dev_acc={}".format(i + 1, loss, train_acc, dev_acc))
min_loss = min(min_loss, loss)
if np.linalg.norm(gradient) <= epsilon:
# print("lr={}, min_loss={}".format(lr, min_loss))
break
loss = ((-1 * y * np.log(sigmoid(x.dot(w)))) - (( | np.ones(x.shape[0]) | numpy.ones |
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from common.utils import compute_std_of_mean
SAVE_ROOT = '../../figs_sigcomm22'
plt.style.use('seaborn-deep')
plt.rcParams['font.family'] = 'Arial'
# plt.rcParams['font.size'] = 42
# plt.rcParams['axes.labelsize'] = 42
# plt.rcParams['legend.fontsize'] = 42
# plt.rcParams['figure.figsize'] = (11, 9)
plt.rcParams['svg.fonttype'] = 'none'
HATCHES = ['/', '\\', 'x', 'o', '.', 'O', '-', '*', '+']
WIDTH = 0.3
bbr_reward, bbr_tput, bbr_tail_lat, bbr_loss = 192.81, 32.94, 368.93, 0.03
copa_reward, copa_tput, copa_tail_lat, copa_loss = 183.89, 25.70, 265.02, 0.01
cubic_reward, cubic_tput, cubic_tail_lat, cubic_loss = -19.16, 33.99, 802.69, 0.02
vivace_reward, vivace_tput, vivace_tail_lat, vivace_loss = -547.01, 21.71, 947.25, 0.13
vivace_latency_reward, vivace_latency_tput, vivace_latency_tail_lat, vivace_latency_loss = -548.64, 21.84, 1010.43, 0.13
vivace_loss_reward, vivace_loss_tput, vivace_loss_tail_lat, vivace_loss_loss = -825.15, 28.89, 1125.94, 0.26
genet_reward = 223.88
genet_reward_err = 8.05
genet_tput, genet_tail_lat, genet_loss = 31.77, 183.75, 0.02
udr1_reward = 136.81
udr1_reward_err = 23.61
udr1_tput, udr1_tail_lat, udr1_loss = 23.16, 204.23, 0.03
udr2_reward = 158.48
udr2_reward_err = 17.71
udr2_tput, udr2_tail_lat, udr2_loss = 23.09, 185.58, 0.02
udr3_reward = 159.34
udr3_reward_err = 22.83
udr3_tput, udr3_tail_lat, udr3_loss = 22.72, 179.06, 0.02
real_reward = 191.61
real_reward_err = 3.88 # 26.39 250.47 0.02
cl1_reward = 143.86
cl1_reward_err = 7.64 # 22.53 206.07 0.02
cl2_reward = 177.97
cl2_reward_err = 4.55 # 23.17 204.86 0.01
udr3_real_5percent_ethernet_rewards = [177.2, 209.8, 95.2]
udr3_real_10percent_ethernet_rewards = [139, 175, 173]
udr3_real_20percent_ethernet_rewards = [133, 125, 151]
udr3_real_50percent_ethernet_rewards = [162, 124, 78]
column_wid = 0.7
capsize_wid = 8
eline_wid = 2
def generalization_test_ethernet():
plt.rcParams['font.size'] = 36
plt.rcParams['axes.labelsize'] = 36
plt.rcParams['axes.titlesize'] = 36
plt.rcParams['legend.fontsize'] = 36
fig, ax = plt.subplots(figsize=(9, 5))
# plt.bar([1, 2], [bbr_reward, cubic_reward], hatch=HATCHES[:2])
bars = ax.bar([1, 2, 3, 4],
[udr1_reward, udr2_reward, udr3_reward, real_reward],
yerr=[udr1_reward_err, udr2_reward_err, udr3_reward_err,
real_reward_err], color='C0', width=column_wid,
error_kw=dict( lw=eline_wid, capsize=capsize_wid))
# bars = ax.bar([1, 2, 3, 4],
# [udr1_reward, udr2_reward, udr3_reward, real_reward],
# color=None, edgecolor='white')
for bar, pat in zip(bars, HATCHES):
bar.set_hatch(pat)
ax.bar([5], [genet_reward], yerr=[genet_reward_err], capsize=8, width=column_wid,
color='C2', error_kw=dict( lw=eline_wid, capsize=capsize_wid))
# plt.title('Ethernet')
ax.set_xticks([1, 2, 3, 4, 5])
ax.set_xticklabels(['RL1', 'RL2', 'RL3', 'RL-real', 'Genet'], rotation=20)
ax.set_ylabel('Test reward')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False) # ticks along the top edge are off
# plt.tight_layout()
svg_file = os.path.join(SAVE_ROOT, 'evaluation_generalization_test_ethernet.svg')
pdf_file = os.path.join(SAVE_ROOT, 'evaluation_generalization_test_ethernet.pdf')
fig.savefig(svg_file, bbox_inches='tight')
os.system("inkscape {} --export-pdf={}".format(svg_file, pdf_file))
os.system("pdfcrop --margins 1 {} {}".format(pdf_file, pdf_file))
def asymptotic_real():
plt.rcParams['font.size'] = 34
plt.rcParams['axes.labelsize'] = 34
plt.rcParams['axes.titlesize'] = 34
plt.rcParams['legend.fontsize'] = 34
fig, ax = plt.subplots(figsize=(9.5, 5))
bbr_reward = 192.81 #32.94 368.93 0.03
udr_real_synthetic_reward = 171.16 # 23.67 194.00 0.02
udr_real_synthetic_reward_err = 24.22
genet_real_synthetic_reward = 239.39 # 30.93 208.04 0.02
genet_real_synthetic_reward_err = 7.34
cubic_reward = 97.16 # 33.99 802.69 0.02
# plt.bar([1, 2], [bbr_reward, cubic_reward])
# plt.bar([3.5, 4.5,],
ax.bar([1, 2.2, 3.4, 4.6, 5.8],
[udr_real_synthetic_reward, # 1%
np.mean(udr3_real_10percent_ethernet_rewards),
| np.mean(udr3_real_20percent_ethernet_rewards) | numpy.mean |
# -*- coding: utf-8 -*
"""
---------------------
SOFTWARE DESCRIPTION:
---------------------
Written October 2018 -- <NAME>
Typeset in Python 3
This python class is specifically made for the spectroscopic data reduction of the Shelyak eShel spectrograph
which is installed at the Hertzsprung SONG node telescope at Tenrife, Spain. The software is originally built
from structures of the 'SONGWriter' which is SONG's spectroscopic data reduction pipeline, and by others is
inspired by the data reduction pipeline 'FIESTools' of the FIES spectrograph at the NOT on La Palma.
"""
# Numpy:
import numpy as np
# Astropy:
from astropy.io import fits
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation
# PyAstronomy:
import PyAstronomy as pyas
# SciPy:
import scipy
import scipy.constants
import scipy.io
import scipy.ndimage
from scipy.ndimage import median_filter
# Matplotlib:
import matplotlib.pyplot as plt
from matplotlib import gridspec
from tikzplotlib import save as tikz_save
# Others:
import math, sys, time, glob, pylab, heapq
import bottleneck
from skimage import feature as skfeature
# Error of propagation (nominal_value, std_dev):
import uncertainties.unumpy as up
from uncertainties import ufloat
def val(x): return up.nominal_values(x)
def err(x): return up.std_devs(x)
# Own functions:
import Plot_Tools as pt
# Global settings for out-print to terminal (allow more digits and nice coloum ordering):
np.set_printoptions(suppress=True, formatter={'float_kind':'{:7.5f}'.format}, linewidth=100)
############################################################################################################
# DEFINE CLASS #
############################################################################################################
class BlueSONG(object):
# INITILIZE THE CLASS:
def __init__(self, path, img_name):
#-------------------------------
# DEFINE GLOBAL VARIABLES (DGV):
#-------------------------------
# USER DEFINED VARIABLES:
self.img_name = img_name # Name of image files
self.path = path # Directory path to data
self.path_img = '/home/nicholas/Dropbox/thesis/latex/pictures/'
self.path_blues = '/home/nicholas/Dropbox/Software/Python/blues/'
self.cross_cut = [50, 500] # Cut of spectral region in cross dispersion
self.orders = [1, 2]
self.n_orders = len(self.orders)
# File handling:
self.img_files = np.sort(glob.glob('{}{}*'.format(self.path, self.img_name)))
self.hdul = np.array([fits.open(str(files)) for files in self.img_files])
# Extract headers and sepearte files:
self.img_type = [self.hdul[i][0].header['IMAGETYP'] for i in range(len(self.img_files))]
self.BF_dex = np.where(np.array(self.img_type)=='bias')[0]
self.DF_dex = np.where(np.array(self.img_type)=='dark')[0]
self.FF_dex = np.where(np.array(self.img_type)=='flat')[0]
self.TA_dex = np.where(np.array(self.img_type)=='thar')[0]
self.SF_dex = np.where(np.array(self.img_type)=='star')[0]
# Open header of object:
header = self.hdul[self.SF_dex[0]][0].header
# Observation information:
self.datetime = header['DATE-OBS'] # Date and time of observation (string)
self.date = self.datetime[:10] # Date of observation (string)
self.jdate = header['JD-DATE'] # Julian date (float)
self.altitude = header['OBJ-ALT'] # [deg] Initial altitude of target during obs (float)
self.seeing = header['SEEING2'] # [arcsec] Running mean seeing on slit guiders (float)
# Target information:
self.target = header['OBJECT'] # Name of target (string)
self.ra = header['OBJ-RA'] # Object Right Accension (string)
self.dec = header['OBJ-DEC'] # Object Declination (string)
self.magnitude = header['OBJ-MAG'] # Magnitude of object (float)
# Dimension constants and arrays:
self.len_disp = header['NAXIS1'] # [pixel] Height of image (int)
self.len_cross = header['NAXIS2'] # [pixel] Width of image (int)
self.cen_disp = int(self.len_disp/2) # [pixel] Center position of disp (int)
self.cen_cross = int(self.len_cross/2) # [pixel] Center position of cross (int)
self.disp = np.arange(self.len_disp) # [pixel] Integers spanning disp (array)
# eShel and CCD setup constants:
self.res_power = 10000 # Resolving power
self.gain = 0.27 # [e-/ADU] Gain at -10 degC
self.ron = 20 # [e-] Read-out-noise
self.pixel_size= header['PIXSIZE1'] # [micro m]
# HK survey constants:
self.V = 3901.000 # [Å] V quasi-continuum center
self.K = 3933.664 # [Å] Ca II K line
self.H = 3968.470 # [Å] Ca II H line
self.R = 4001.000 # [Å] R quasi-continuum center
self.bands = [self.V, self.K, self.H, self.R]
self.VR_bandpass = 20.0 # [Å] Bandpass of V and R continuums
self.HK_bandpass = 1.09 # [Å] Bandpass of K and H lines
######################################################################################################
# CALIBRATION #
######################################################################################################
def image_reduction(self, redo=0, plot=0):
"""
This routine takes data path and loads all image files given in the directory.
It combines the bias, dark, flat, and ThAr frames and make master frames used
for the image reduction of the science frames. The routine checks if master
calibrations frames already exists: (1) if they do it terminates, (2) if not it
continues the image reduction. All calibration frames are saved with an extensions
of the date, and science frames with the extension of the date and time of observation.
----------------------------
INPUT :
----------------------------
path (string): Path to data
plot (integer): Plot flag activated by 1
----------------------------
OUTPUT :
----------------------------
BF_XX-XX-XX (fits): Master bias
DF_XX-XX-XX (fits): Master dark
FF_XX-XX-XX (fits): Master flat
TA_XX-XX-XX (fits): Master flat
SF_XX-XX-XXTXX:XX:XX (fits): Science frame(s): Bias and dark frame calibrated light frames.
"""
#------------------------------------------
# TEST IF CALIBRATION IMAGES ALREADY EXIST:
#------------------------------------------
try:
BF = fits.open('{}BF_{}.fits'.format(self.path, self.date))[0].data
DF = fits.open('{}DF_{}.fits'.format(self.path, self.date))[0].data
FF = fits.open('{}FF_{}.fits'.format(self.path, self.date))[0].data
TA = fits.open('{}TA_{}.fits'.format(self.path, self.date))[0].data
SF = fits.open('{}SF_{}.fits'.format(self.path, self.datetime))[0].data
except IOError:
BF = []
#-------------------------
# ELSE USE AVAILABLE DATA:
#-------------------------
if BF==[] or redo==1:
# Find all calibration images:
BF_i = np.array([fits.getdata(str(self.img_files[i])) for i in self.BF_dex])
DF_i = np.array([fits.getdata(str(self.img_files[i])) for i in self.DF_dex])
FF_i = np.array([fits.getdata(str(self.img_files[i])) for i in self.FF_dex])
TA_i = np.array([fits.getdata(str(self.img_files[i])) for i in self.TA_dex])
SF_i = np.array([fits.getdata(str(self.img_files[i])) for i in self.SF_dex])
# Exposure times:
DF_exptimes = [self.hdul[self.DF_dex[i]][0].header['EXPTIME'] for i in range(len(self.DF_dex))]
FF_exptimes = [self.hdul[self.FF_dex[i]][0].header['EXPTIME'] for i in range(len(self.FF_dex))]
TA_exptimes = [self.hdul[self.TA_dex[i]][0].header['EXPTIME'] for i in range(len(self.TA_dex))]
SF_exptimes = [self.hdul[self.SF_dex[i]][0].header['EXPTIME'] for i in range(len(self.SF_dex))]
# Test if exposure times are the same:
if int(np.sum(np.diff(DF_exptimes))) is not 0:
print('ERROR: Dark exposure times are different!'); sys.exit()
if int(np.sum(np.diff(FF_exptimes))) is not 0:
print('ERROR: Flat exposure times are different!'); sys.exit()
if int(np.sum(np.diff(TA_exptimes))) is not 0:
print('ERROR: ThAr exposure times are different!'); sys.exit()
#---------------------
# PERFORM CALIBRATION:
#---------------------
# Make master bias:
BF = np.median(BF_i, axis=0)
# Make scaled master dark:
DF_current = np.median(DF_i - BF, axis=0)
DF_FF = (FF_exptimes[0]/DF_exptimes[0]) * DF_current
DF_TA = (TA_exptimes[0]/DF_exptimes[0]) * DF_current
DF = (SF_exptimes[0]/DF_exptimes[0]) * DF_current
# Make master flat:
FF = np.median(FF_i - BF - DF_FF, axis=0)
# Make master ThAr:
TA = np.median(TA_i - BF - DF_TA, axis=0)
# Calibrate science frames:
SF = (SF_i - BF - DF)#/(FF/np.max(FF))
#--------------------
# SAVE MASTER FRAMES:
#--------------------
# Find hdulists:
BF_hdul = self.hdul[self.BF_dex[0]][0].header
DF_hdul = self.hdul[self.DF_dex[0]][0].header
FF_hdul = self.hdul[self.FF_dex[0]][0].header
TA_hdul = self.hdul[self.TA_dex[0]][0].header
# Save master calibration images:
fits.writeto('{}BF_{}.fits'.format(self.path, self.date), BF, BF_hdul, overwrite=True)
fits.writeto('{}DF_{}.fits'.format(self.path, self.date), DF, DF_hdul, overwrite=True)
fits.writeto('{}FF_{}.fits'.format(self.path, self.date), FF, FF_hdul, overwrite=True)
fits.writeto('{}TA_{}.fits'.format(self.path, self.date), TA, TA_hdul, overwrite=True)
# Save calibrated science frames one by one:
for i in range(len(self.SF_dex)):
SF_hdul = self.hdul[self.SF_dex[i]][0].header
header = self.hdul[self.SF_dex[i]][0].header['DATE-OBS']
fits.writeto('{}SF_{}.fits'.format(self.path, header), SF[0], SF_hdul, overwrite=True)
# Only use first image if routine is running furter:
SF = SF[0]
#-----------------------------
# LOAD RV AMPLITUDE OF OBJECT:
#-----------------------------
file_object = glob.glob('{}SF*'.format(self.path))
hdul_object = fits.open(str(file_object[0]))
self.rv_amp = hdul_object[0].header['OBJ-RV'] # [km/s] CDS RV amplitude (float)
#-----------------------------------------------------------
if plot==1: pt.plot_image_reduction(BF, DF, FF, TA, SF)
#-----------------------------------------------------------
# Select spectral region of interest:
self.BF = BF; self.DF = DF; self.FF = FF; self.TA = TA
self.F_calib = FF[self.cross_cut[0]:self.cross_cut[1], :].T
self.T_calib = TA[self.cross_cut[0]:self.cross_cut[1], :].T
self.S_calib = SF[self.cross_cut[0]:self.cross_cut[1], :].T
self.noise = np.sqrt(np.mean(BF**2))
#-----------------------------------------------------------
return self.S_calib, self.F_calib, self.T_calib
########################################################################################################
# FIND ORDERS #
########################################################################################################
def trace_orders(self, data=None, smooth_win=10, exclude_border=10, min_order_width=40, \
threshold_abs=0, disp_gap_tol=5, num_orders=5, num_peaks=10, plot=0):
"""
This function find the orders in an eshel spectrum by tracing the maximum light distribution along
each order. First the function finds a center order position and use this as a reference. Next the
function finds the ridges of the specified number of order 'num_orders' using the skfeature package.
Lastely, each order is the discribed by a 5 order polynomial and returned as output.
----------------------------
INPUT :
----------------------------
data (array): A single image
smooth_win (float): Smooth value to enhance orders
exclude_border (float): Border edges that should be exluded
order_min_width (float): Minimum distance to locate where the orders are
threshold_abs (float): Threshold used to locate peaks with skfeature
disp_gap_tol (float): Tolerance for how big a gap there may be
num_orders (float): User specified number of orders the program should find
num_peaks (float): Number of peaks found for each bin
----------------------------
OUTPUT :
----------------------------
order_traces (dict): Orders within 'order x' and corresponding array with polynomials
"""
#------------------------------
# CHECK FOR PROGRAM PARAMETERS:
#------------------------------
if data==None: data = self.F_calib
#----------------------------------
# FIND CENTRAL REFERENCE POSITIONS:
#----------------------------------
# Central position interval
ref_int = [self.cen_disp-5, self.cen_disp+6]
ref_cen_pos = self.find_ref_cen_pos(data, ref_int, smooth_win, exclude_border, min_order_width,\
threshold_abs, num_orders, plot)
#------------------------
# TRACE THE ORDER RIDGES:
#------------------------
ridge_pos_cross, ridge_pos_disp = self.find_order_ridges(data, smooth_win, exclude_border,\
min_order_width, threshold_abs, num_peaks)
#------------------------------------
# FILL IN DATA INTO THE FOUND RIDGES:
#------------------------------------
# Make dict placeholders:
order_traced = {}
order_trace = {}
for i, order_pos in enumerate(np.sort(ref_cen_pos)[::-1]):
# Here "order_pos" is the cross dispersion center value. order_pos[0] simply chooses one
# value and not the increasing list within the loop.
# Using ridges trace each order in each direction:
min_order_width = 10
order_trace_cross, order_trace_disp = self.find_order_outliers(self.cen_disp, order_pos[0],\
ridge_pos_disp, ridge_pos_cross,\
min_order_width, disp_gap_tol)
# Fit ridges with polynomial:
poly_coefs = np.polyfit(order_trace_disp, order_trace_cross, 5)
order_traced['order_{}'.format(i)] = poly_coefs
order_trace['order_{}'.format(i)] = [order_trace_disp, order_trace_cross]
#-----------------------------------------------------------------------------
if plot==1:
pt.plot_trace_order(ridge_pos_disp, ridge_pos_cross, order_trace, order_traced, \
order_trace_disp, self.cen_disp, ref_cen_pos)
#-----------------------------------------------------------------------------
self.ref_cen_pos = ref_cen_pos
self.trace = order_traced
#-----------------------------------------------------------------------------
return order_traced
def find_ref_cen_pos(self, data, ref_int, smooth_win, exclude_border, min_distance, threshold_abs, \
num_peaks, plot):
"""
This function finds the center order position used as a reference.
"""
# Collapse in disp direction to reduce cosmic ray contamination:
# (FIXME done to make this robust against cosmics - maybe it is not needed)
center_rows_median = np.median(data[ref_int[0]:ref_int[1], :], axis=0)
# Smooth cross_dispersion direction to prepare for the peak-detection algorithm:
center_row_median_convolved = bottleneck.move_sum(center_rows_median.astype(np.float), \
smooth_win, min_count=1)
# Find orders using a peak detection function from scikit-image:
order_centres = skfeature.peak_local_max(center_row_median_convolved, \
exclude_border=exclude_border,\
min_distance=min_distance, threshold_rel=0,\
threshold_abs=threshold_abs, num_peaks=num_peaks)
# Peaks detected minus the smooth window applied (simply due to the moving sum of bottleneck):
ref_cen_pos = order_centres - int(smooth_win/2)
#------------------------------------------------------------------------------
if plot==1:
pt.plot_find_ref_cen_pos(center_rows_median, center_row_median_convolved, \
self.len_cross, smooth_win, ref_cen_pos)
#------------------------------------------------------------------------------
return ref_cen_pos
def find_order_ridges(self, data, smooth_win, exclude_border, min_distance, threshold_abs, num_peaks):
"""
This function finds the ridge of each order. It does so by making a slice in cross dispersion and
colvolve that with a smooth filter such as the "bottleneck.move_sum". It then finds the local max
for each slice and saves the position
"""
# Placeholders:
ridge_indices_disp = []
ridge_indices_cross = []
# Loop over the dispersion length (i) and the cross order row:
for i, crossorder in enumerate(data):
# Collapse in dispersion axis:
# TODO should smoothing be handled separately?
top_hat_conv = bottleneck.move_sum(crossorder.astype(np.float), smooth_win, min_count=1)
# Again find the peaks as done in "find_ref_cen_pos":
peaks = skfeature.peak_local_max(top_hat_conv, exclude_border=exclude_border,\
min_distance=min_distance, threshold_rel=0,\
threshold_abs=threshold_abs, indices=True, num_peaks=num_peaks)
# Convert peaks to a list covering the ridges:
peaks -= int(smooth_win/2)
ridge_indices_cross = np.append(ridge_indices_cross, peaks)
ridge_indices_disp = np.append(ridge_indices_disp, np.ones(peaks.shape[0]) * i)
#-----------------------------------------------------
return ridge_indices_cross, ridge_indices_disp
def find_order_outliers(self, cen_disp, ref_cen_cross, all_orders_x, all_orders_y, order_width,\
disp_gap_tol):
"""
This utility takes the found reference positions in cross dispersion and the traced ridges and
locate all the outliers defined by 'order_width' threshold. If center_row is not an integer this
will fail!
"""
# To simplify the code we make some abbreviations:
x = np.unique(all_orders_x)
y_last = ref_cen_cross
x_last = x[cen_disp]
cross_gap_tol = int(order_width/2.)
# Placeholders for outliers:
cross = []
disp = []
# Outliers on the left side of cen_disp:
for xi in x[cen_disp:]:
index_xi = all_orders_x == xi
orders_y = all_orders_y[index_xi]
min_dist_index = np.argmin(np.abs(orders_y - y_last))
new_y_pos = orders_y[min_dist_index]
if (np.abs(new_y_pos - y_last) < cross_gap_tol) & (np.abs(xi - x_last) < disp_gap_tol):
cross.append(new_y_pos)
y_last = cross[-1]
disp.append(xi)
x_last = disp[-1]
y_last = ref_cen_cross
x_last = x[cen_disp]
# Outliers on the right side of cen_disp:
for xi in x[cen_disp-1::-1]:
index_xi = all_orders_x == xi
orders_y = all_orders_y[index_xi]
min_dist_index = np.argmin(np.abs(orders_y - y_last))
new_y_pos = orders_y[min_dist_index]
if (np.abs(new_y_pos - y_last) < cross_gap_tol) & (np.abs(xi - x_last) < disp_gap_tol):
cross.append(new_y_pos)
y_last = cross[-1]
disp.append(xi)
x_last = disp[-1]
index = np.argsort(disp)
#---------------------------------------------------
return np.array(cross)[index], np.array(disp)[index]
########################################################################################################
# INTER-ORDER MASK #
########################################################################################################
def inter_order_mask(self, data=None, order_traces=None, order_width=None, \
low_nudge=0, high_nudge=0, plot=0):
"""
This function is used to determine the background flux which will be used to correct for scattered
light, wignetting, etc. The function looks at the flux level in between the orders ("inter-order")
and make and return a mask with ones for which is inter-order and zero elsewhere. The function uses
the result from the previos subdroutine "traced orders".
----------------------------
INPUT :
----------------------------
order_width (dict) : Traced orders found from the function 'trace'
order_traces (int, float): Width of inter-order mask
low_nudge (int, float): Number of pixels used below the traced orders
high_nudge (int, float): Number of pixels used above the traced orders
plot (int, float): Plot result if you like
----------------------------
OUTPUT :
----------------------------
inter_order_mask (dict) : Orders within 'order x' and corresponding array with polynomials
"""
#------------------------------
# CHECK FOR PROGRAM PARAMETERS:
#------------------------------
if data ==None: data = self.F_calib
if order_traces==None: order_traces = self.trace
if order_width ==None: order_width = self.find_optimal_width(plot=plot) # FUNCTION CALL!
# Check if the inter-order width is odd integer:
inter_order_width = int(order_width * 4/3)
if inter_order_width % 2 == 0: inter_order_width = inter_order_width - 1
# Constants and placeholders:
inter_order_mask = data * 0 + 1 # Initial image mask of ones
disp = np.arange(self.len_disp) # Number pixel interval in dispersion
order_no = sorted(order_traces.keys()) # Orders numbers (string)
cross_order_center = []
#-----------------------
# FIND ALL INTER-ORDERS:
#-----------------------
# First loop through each order:
for order in order_no:
# Get the coefficients from the trace function:
coefs = order_traces[order]
cross_order_position = np.polyval(coefs, disp) # Polyfit to each order
cross_order_center = np.append(cross_order_center, cross_order_position[int(self.len_disp/2)])
# Each inter order is found:
for disp_i in range(self.len_disp):
lower_order_edge =int(np.round(cross_order_position[disp_i]-inter_order_width/2-low_nudge))
upper_order_edge =int(np.round(cross_order_position[disp_i]+inter_order_width/2+high_nudge))
inter_order_mask[int(disp_i), lower_order_edge:upper_order_edge] = 0
# Distance/size of each inter order:
inter_order_size = cross_order_center[1:] - cross_order_center[:-1] - inter_order_width \
- low_nudge - high_nudge
#-----------------------
# REMOVE 'GHOST' ORDERS:
#-----------------------
# Predict inter_order_size:
xx = np.arange(len(cross_order_center)-1)
inter_order_size_fit = np.polyfit(xx, inter_order_size, 2)
size_before = np.polyval(inter_order_size_fit, -1)
size_after = np.polyval(inter_order_size_fit, len(cross_order_center))
# Remove 'ghost orders' before first order:
coefs = order_traces[order_no[0]]
cross_order_position = np.polyval(coefs, disp)
for disp_i in range(self.len_disp):
lower_inter_order_edge = np.round(cross_order_position[disp_i] - inter_order_width/2 \
- low_nudge - size_before).astype(int)
# Remove orders below edges:
if lower_inter_order_edge < 0: lower_inter_order_edge = 0
inter_order_mask[disp_i, :lower_inter_order_edge] = 0
# Remove 'ghost orders' after last order:
coefs = order_traces[order_no[-1]]
cross_order_position = np.polyval(coefs, disp)
for disp_i in range(self.len_disp):
upper_inter_order_edge = np.round(cross_order_position[disp_i] + inter_order_width/2 \
+ high_nudge + size_after).astype(int)
# Remove orders above edges:
if upper_inter_order_edge > self.len_cross+50: upper_inter_order_edge = 0
inter_order_mask[disp_i, upper_inter_order_edge:] = 0
#--------------------------------------------------------------
if plot==1: pt.plot_inter_order_mask(data, inter_order_mask)
#--------------------------------------------------------------
self.inter_order_width = inter_order_width
self.inter_order_mask = inter_order_mask
#--------------------------------------------------------------
return self.inter_order_mask
########################################################################################################
# BACKGROUND IMAGE #
########################################################################################################
def background(self, image, inter_order_mask=None, order_ref=None, \
poly_order_y=2, poly_order_x=4, filter_size=5, plot=0):
"""
This function estimates the background flux of scattered light and subtract it. It uses the
inter_order_mask to perform this removal.
----------------------------
INPUT :
----------------------------
mask (2d array): Background mask with ones and zeros
poly_order_x (int, float): Order of polynomy to fits background flux in dispersion
poly_order_y (int, float): Order of polynomy to fits background flux in cross dispersion
nsteps (int, float): Number of steps
orderdef (int, float):
----------------------------
OUTPUT :
----------------------------
background_image (2d array):
"""
#------------------------------
# CHECK FOR PROGRAM PARAMETERS:
#------------------------------
if inter_order_mask==None: inter_order_mask = self.inter_order_mask
#----------------------------
# CONSTANTS AND PLACEHOLDERS:
#----------------------------
# Create a background image:
(ysize, xsize) = image.shape
background_image = np.zeros((ysize, xsize), dtype=np.float64)
# Data size in arange:
xx = np.arange(xsize, dtype=np.float64)
yy = np.arange(ysize, dtype=np.float64)
# Array to withhold fitted y values:
xfitarr = np.zeros((len(yy), xsize), dtype=np.float64)
# Step size and range:
yvals = np.arange(len(yy))
# Constants:
ycount = 0
niter = 0
sigma_limit = 3
# For plots:
s_disp = [500, 1500, int(yvals[-1])] # Slices in disp
s_cros = [50, 200, int(xx[-1])] # Slices in cross
#----------------------------
# FIT IN Y-DIRECTION (CROSS):
#----------------------------
for i in yvals:
# Cut out slice in cross dispersion with width determined by 'filter_size':
ymin_ind = np.max([i - filter_size, 0])
ymax_ind = np.min([i + filter_size, ysize-1])
y_slice = image[ymin_ind:ymax_ind, :]
# Collapse in dispersion to a single cross row:
y_mean = np.mean(y_slice, axis=0)
# Indices/image of inter-order mask in cross row:
y_image = np.where(inter_order_mask[i, :] == 1)[0]
# Perform fitting with sigma-clipping:
while 1:
# Make polynomial fit:
coefs = np.polyfit(y_image, y_mean[y_image], poly_order_y)
xfit = np.polyval(coefs, y_image)
# Find sigma:
sigma = (y_mean[y_image] - xfit) / np.std(y_mean[y_image] - xfit)
rejected = np.extract(sigma > sigma_limit, y_image)
y_image = np.extract(sigma < sigma_limit, y_image)
# Loop until all image are within sigma or niter is reached:
niter = niter + 1
if niter == 5 or rejected.size == 0:
break
# Final polynomial fit:
xfit = np.polyval(coefs, xx) # fitted line
xfitarr[ycount, :] = xfit # Array with fit constants for each slice
ycount = ycount + 1
# Save values for plotting:
if i==s_disp[0]: yi0, ym0, yfit0 = y_image, y_mean[y_image], xfit
if i==s_disp[1]: yi1, ym1, yfit1 = y_image, y_mean[y_image], xfit
if i==s_disp[2]: yi2, ym2, yfit2 = y_image, y_mean[y_image], xfit
#---------------------------
# FIT IN X-DIRECTION (DISP):
#---------------------------
goodind = np.arange(len(yy))
for i in np.arange(xsize):
# Perform fitting with sigma-clipping:
while 1:
# Make polynomial fit:
coefs = np.polyfit(yvals.take(goodind), xfitarr[goodind, i], poly_order_x)
yfit = np.polyval(coefs, yvals[goodind])
# Find sigma:
sigma = (xfitarr[goodind, i] - yfit) / np.std(xfitarr[goodind, i] - yfit)
rejected = np.extract(sigma > sigma_limit, goodind)
goodind = np.extract(sigma < sigma_limit, goodind)
# Loop until all image are within sigma:
niter = niter + 1
if niter == 3 or rejected.size == 0 or goodind.size == 0:
break
# In case the image quality is higher than sigma_limit (poor quality image):
if goodind.size == 0:
print("Error: no points left when y-fitting the background")
coefs = np.polyfit(xfitarr[:, i])
# Final background image is constructed:
background_image[:, i] = np.polyval(coefs, yy)
# Save values for plotting:
if i==s_cros[0]: xi0, xm0, xfit0 = yvals[goodind], xfitarr[goodind, i], background_image[:,i]
if i==s_cros[1]: xi1, xm1, xfit1 = yvals[goodind], xfitarr[goodind, i], background_image[:,i]
if i==s_cros[2]: xi2, xm2, xfit2 = yvals[goodind], xfitarr[goodind, i], background_image[:,i]
#---------------------
# SUBTRACT BACKGROUND:
#---------------------
corrected_image = image - background_image
#--------------------------------------------------------------
if plot is 1:
pt.plot_background_fits(s_disp, s_cros, poly_order_y, poly_order_x, \
xx, yi0, yi1, yi2, ym0, ym1, ym2, yfit0, yfit1, yfit2, \
yy, xi0, xi1, xi2, xm0, xm1, xm2, xfit0, xfit1, xfit2)
pt.plot_background(background_image)
#--------------------------------------------------------------
return corrected_image, background_image
########################################################################################################
# EXTRACT SPECTRUM #
########################################################################################################
def spectral_extraction(self, S, F, T, trace=None, order_width=None, plot=0):
"""
This function uses the 'order_width' estimated earlier and first cut the spectrum in question using
the utility 'cut_out_order'. All order of relevance is cut out, and a simple-sum over the spatial
profile is used to get the 1D spectrum. Next clear cosmic hits are removed using the utility
'locate_outliers' and finally the normalized flat blaze from each order is used to de-blaze each
spectral order.
----------------------------
INPUT :
----------------------------
S (2d array): Stellar spectrum
F (2d array): Flat spectrum
T (2d array): ThAr arc spectrum
trace (bib): subfunction with poly-fits to all the orders
order_width (int): Spatial order width for cutting out the order
----------------------------
OUTPUT :
----------------------------
s_deblaze (1d array): De-blazed 1D stellar spectral orders
T_orders (2d array): ThAr arc image orders
"""
#------------------------------
# CHECK FOR PROGRAM PARAMETERS:
#------------------------------
if trace ==None: trace = self.trace
if order_width==None: order_width = self.order_width
#----------------------------------------
# FIRST ITERATION WITH LINEAR EXTRACTION:
#----------------------------------------
# Make sure that the order width is a odd number:
if order_width % 2 == 0: order_width = order_width - 1
# Cut out orders with spatial size (order number is coundt bottom-up):
S_orders = [self.cut_out_order(S, np.polyval(trace['order_{}'.format(self.orders[i])], self.disp), \
order_width) for i in range(self.n_orders)]
F_orders = [self.cut_out_order(F, np.polyval(trace['order_{}'.format(self.orders[i])], self.disp), \
order_width) for i in range(self.n_orders)]
T_orders = [self.cut_out_order(T, np.polyval(trace['order_{}'.format(self.orders[i])], self.disp), \
order_width) for i in range(self.n_orders)]
# Linear extraction object and blaze:
s_orders = [S_orders[i].sum(axis=1) for i in range(self.n_orders)]
f_orders = [F_orders[i].sum(axis=1) for i in range(self.n_orders)]
#------------------------------
# HEREAFTER OPTIMAL EXTRACTION:
#------------------------------
# Initial variance image:
#V = V0 + np.abs(S_calib, axis=0)/Q
# # Cut out orders with spatial size 5*FWHM:
# S = self.cut_out_order(np.polyval(trace['order_2'], self.disp), S_calib)
# # Find extracted spectrum:
# s = np.zeros(np.shape(s))
# for i in range(self.len_disp):
# # Variance image:
# V = self.V0 + np.abs(s*P+S_sky, axis=0)/self.Q
# # Linear image:
# s[i] = np.sum((P*S_sky/V)/(P**2/V), axis=1)
#--------------------------------------------------------------
#if plot==1: pt.plot_optimal_extraction(S_orders[1][:, 900:950].T)
#--------------------------------------------------------------
self.S_orders = S_orders; self.F_orders = F_orders; self.T_orders = T_orders
self.s_orders = s_orders; self.f_orders = f_orders
#--------------------------------------------------------------
return [self.S_orders, self.F_orders, self.T_orders]
def cut_out_order(self, image, traced_order, cross_order_width=21):
"""
This utility takes the polynomial describtion 'traced_order' and the relevant
order and a spectrum, and cuts out the spectrum in total dispersion length and
'cross_order_width' pixels in cross dispersion around this spectral order. It
the returns the bandpass image 'order_cut' and positions which can be used for
an easy way of flotting the result. 'cross_order_width' needs to be odd number.
"""
# Conatant and placeholders:
half_width = int(cross_order_width/2.)
order_cut = np.zeros((self.len_disp, cross_order_width))
cross_order_positions = np.zeros((self.len_disp, cross_order_width))
# This loop cuts out the order:
for d in np.arange(self.len_disp):
position = traced_order[d]
rounded_position = int(np.round(position))
# Fill in the columns of the order:
cp = image[d, rounded_position - half_width:rounded_position + half_width + 1]
order_cut[d,:] = cp
# Fill in the cross order position:
x = np.arange(-half_width, half_width + 1) + rounded_position
cross_order_positions[d, :] = x
#--------------------------------------------------------------
return order_cut
########################################################################################################
# WAVELENGTH CALIBRATION #
########################################################################################################
def wavelength_calib(self, T_orders=None, poly_order=3, plot=0):
"""
This utility performs the wavelength calibration. This is done first using the Ca II lines as a
initial reference of the wavelength scale. Thus, the utility 'calcium_lines_identifier' finds the
peaks values of the H and K lines, then 'peak_finder' finds all ThAr lines in the order above a
certain threshold, and lastly a FIES ThAr atlas is used to set the real wavelength scale of the
order, which is returned as output. The terminology here is that (p) is pixel wavelength, (l) is
wavelenght in Å, and (x) is the spatial direction.
----------------------------
INPUT :
----------------------------
s_orders (1d array): Stellar spectrum for each order
T_orders (2d array): Extracted ThAr image of each order
poly_order (int): Order of polynomial function for fitting wavelength relation
----------------------------
OUTPUT :
----------------------------
[l0, l1] (1d arrays): New wavelength scale for each order
"""
#------------------------------
# CHECK FOR PROGRAM PARAMETERS:
#------------------------------
if T_orders==None: T_orders = self.T_orders
#-------------------------
# FIND OBSERVED ARC LINES:
#-------------------------
# Identify lines from different sigma levels:
COF, radii = np.zeros(self.n_orders), np.zeros(self.n_orders)
for i in range(self.n_orders):
COF_i, radii_i, _ = self.peak_finder(T_orders[i], sigma=0.5, plot=0)
COF = [COF[i], COF_i]
radii = [radii[i], radii_i]
# Only keep disp values:
l_cof0 = COF[0][:,0]
l_cof1 = COF[1][:,0]
#---------------------------------------------------
# ITER 1: USE KNOWN FIES LINES AS INITIAL REFERENCE: TODO! automate the first iteration
#---------------------------------------------------
# Cacium lines:
l_ca = [self.K, self.H]
# Load FIES arc atlas:
l_fies0 = [3868.5284, 3873.8224, 3916.4176, 3925.7188, 3928.6233, 3948.9789]
l_fies1 = [3925.7188, 3928.6233, 3948.9789, 3950.3951, 3979.3559, 4013.8566]
# Pixel coordinats to known identified FIES peaks:
l_pix0 = np.array([1091, 1190, 2025, 2217, 2279, 2723])
l_pix1 = np.array([ 900, 952, 1326, 1352, 1910, 2624])
# Find COF lines closest to known pixel coordinates:
l_cof0_ini = [min(l_cof0, key=lambda x:abs(x-l_pix0[i])) for i in range(len(l_pix0))]
l_cof1_ini = [min(l_cof1, key=lambda x:abs(x-l_pix1[i])) for i in range(len(l_pix1))]
# Find wavelenght relations:
r0_1 = self.find_arc_scale(self.disp, l_cof0_ini, l_fies0, poly_order, l_cof0, \
param=['1. ITER: 58', T_orders[0], COF[0], radii[0]], plot=plot)
r1_1 = self.find_arc_scale(self.disp, l_cof1_ini, l_fies1, poly_order, l_cof1, \
param=['1. ITER: 57', T_orders[1], COF[1], radii[1]], plot=plot)
l0_ini, l_cof0_ini, res0_ini, l_cof0_all = r0_1[0], r0_1[1], r0_1[2], r0_1[3]
l1_ini, l_cof1_ini, res1_ini, l_cof1_all = r1_1[0], r1_1[1], r1_1[2], r1_1[3]
#--------------------------------------
# ITER 2: CALIBRATE WITH PHOTRON ATLAS:
#--------------------------------------
# Load Photron arc atlas (http://iraf.noao.edu/specatlas/thar_photron/thar_photron.html):
thar_atlas_phot = np.loadtxt('dependencies/thar_atlas_photron.txt')
l_atlas_phot = thar_atlas_phot[:,2]
# Find wavelenght relations:
r0_2 = self.find_arc_scale(l0_ini, l_cof0_all, l_atlas_phot, poly_order, threshold=0.02, \
param=['2. ITER: 58', T_orders[0], COF[0], radii[0]], plot=plot)
r1_2 = self.find_arc_scale(l1_ini, l_cof1_all, l_atlas_phot, poly_order, threshold=0.02, \
param=['2. ITER: 57', T_orders[1], COF[1], radii[1]], plot=plot)
l0, l_cof0, std0 = r0_2[0], r0_2[1], r0_2[2]
l1, l_cof1, std1 = r1_2[0], r1_2[1], r1_2[2]
#--------------------------------------------------------------
if plot==1: pt.plot_arc_check([l0, l1], T_orders, l_ca, 'FINAL RESULT')
#--------------------------------------------------------------
self.l_orders = [l0, l1]
self.sigma_w = [std0, std1]
#--------------------------------------------------------------
return self.l_orders
def find_arc_scale(self, l_scale, l_obs, l_atlas, poly_order, l_all=None, \
threshold=None, param=None, plot=None):
"""
This utility takes observed and atlas arc lines and find a wavelength solution given a threshold
for comparing when the lines are close enough to be identified as a match. Thus, this utility works
only if given a fair initial wavelength solution for the observed lines. If no threshold is given it
will be assumed that the this is the initial step going from pixel to wavelenght space, where the
exact coordinate matches are known.
"""
if threshold is not None:
# Find atlas lines closest to observed lines (COF lines):
l_atlas_match = [min(l_atlas, key=lambda x:abs(x-l_obs[i])) for i in range(len(l_obs))]
# Find value difference between matched lines and keep only if diff < 1 Å:
dex_goodin = np.where(abs(l_obs - l_atlas_match) < threshold)[0]
l_atlas_good = [l_atlas_match[dex_goodin[i]] for i in range(len(dex_goodin))]
l_obs_good = [l_obs[dex_goodin[i]] for i in range(len(dex_goodin))]
else:
l_obs_good = l_obs.copy()
l_atlas_good = l_atlas.copy()
# Find new pixel-wavelength relation:
coefs = np.polyfit(l_obs_good, l_atlas_good, poly_order)
# Copy solution to scale and observed lines:
ipoly = np.arange(poly_order+1)[::-1]
l_scale_new = np.sum([coefs[i]*l_scale**ipoly[i] for i in range(poly_order+1)], axis=0)
l_obs_new = np.sum([coefs[i]*l_obs**ipoly[i] for i in range(poly_order+1)], axis=0)
if l_all is not None:
l_all_new = np.sum([coefs[i]*l_all**ipoly[i] for i in range(poly_order+1)], axis=0)
else: l_all_new = None
# Calculate fit parameters:
poly = np.poly1d(coefs)
xp = np.linspace(min(l_obs_good), max(l_obs_good), 1e3)
residuals = poly(l_obs_good)-l_atlas_good
chi2r = 1-residuals/(len(l_obs_good)-poly_order+1)
sigma = np.std(residuals)
#--------------------------------------------------------------
if plot is 1:
# Activate only illustration and save:
#pt.plot_arc_illustration(l_obs, l_atlas, l_obs_good, l_atlas_good, l_scale, param)
pt.plot_arc_fit(l_obs_good, l_atlas_good, coefs, poly_order, residuals, chi2r, sigma, param[0])
pt.plot_arc_scale(l_obs, l_atlas, l_obs_good, l_atlas_good, l_scale, param)
#--------------------------------------------------------------
return l_scale_new, l_obs_new, sigma, l_all_new
########################################################################################################
# DE-BLAZING #
########################################################################################################
def deblazing(self, F_orders=None, l_orders=None, f_orders=None, s_orders=None, plot=0):
"""
This spectral order.
----------------------------
INPUT :
----------------------------
T (2d array): ThAr arc spectrum
trace (bib): subfunction with poly-fits to all the orders
order_width (int): Spatial order width for cutting out the order
----------------------------
OUTPUT :
----------------------------
s_deblaze (1d array): De-blazed 1D stellar spectral orders
T_orders (2d array): ThAr arc image orders
"""
#------------------------------
# CHECK FOR PROGRAM PARAMETERS:
#------------------------------
if F_orders==None: F_orders = self.F_orders
if l_orders==None: l_orders = self.l_orders
if f_orders==None: f_orders = self.f_orders
if s_orders==None: s_orders = self.s_orders
#----------------------
# CORRECT FOR COMSMICS: TODO! IF using optimal extraction this can be fixed at once
#----------------------
# Do not work for arc images ask it is a peak detection algorithm:
f_lincor = [self.locate_outliers(f_orders[i], convolve_step=3, cutoff=2e-2, plot=0) \
for i in range(self.n_orders)]
#----------------------------
# CORRECT FOR BLAZE FUNCTION:
#----------------------------
# Perform blaze correction:
s_deblaz = [(s_orders[i]/f_lincor[i]) for i in range(self.n_orders)]
#-----------------------------------
# SCALE FLAT BALZE TO STELLAR BALZE:
#-----------------------------------
# Find maximum of each blaze:
dex_blaze_max = [np.nanargmax(self.f_orders[i]) for i in range(self.n_orders)]
# Remove all cosmics from spectra to be used only for scaling:
s_coscor = [self.locate_outliers(s_orders[i], convolve_step=3, cutoff=1e-1, plot=0) \
for i in range(self.n_orders)]
# With cosmics removed now scale to maximum difference:
continuum_cor = 0.85 # Correction factor only valid for solar type stars
dif_max = [continuum_cor*np.max(self.f_orders[i])/np.max(s_coscor[i]) for i in range(self.n_orders)]
#--------------------------------------------------------------
if plot==1:
pt.plot_blaze(s_orders, f_orders, f_lincor, dif_max)
pt.plot_deblaze(s_deblaz)
#--------------------------------------------------------------
self.f_orders = f_lincor; self.s_deblaz = s_deblaz
self.dif_max = dif_max
#--------------------------------------------------------------
return self.f_orders, self.s_deblaz
def norm_blaze_function(self, F_order): #TODO! this is not used but may be useful in future
"""
This utility finds the blaze function which recides from the fact that an échelle spectrum is
bowed along the dispersion and thus... To find the blaze function here the order is collapsed
in cross dispersion to a one dimentional array using a simple sum. The 'normalized_order' also
gives an estimate of scatter within the order.
"""
# Use simple sum to collapse order:
f_blaze = np.sum(F_order, axis=1)
# Normalize the spectrum:
F_norm_order = np.zeros(F_order.shape)
for i in range(F_order.shape[1]):
F_norm_order[:, i] = F_order[:, i] / f_blaze
#--------------------------------------------------------------
return F_norm_order
#F_norm = [self.norm_blaze_function(F_orders[i]) for i in range(self.n_orders)]
#S_deblaze = [self.S_orders[i]/F_norm[i] for i in range(self.n_orders)]
#s_deblaz_norm = [S_deblaze[i].sum(axis=1) for i in range(self.n_orders)]
#####################################################################################################
# SCRUNCH, MERGE, AND CLIP #
#####################################################################################################
def scrunch_and_merge(self, l_orders=None, s_deblaz=None, plot=0):
"""
This function estimates
----------------------------
INPUT :
----------------------------
mask (2d array): Background mask with ones and zeros
----------------------------
OUTPUT :
----------------------------
background_image (2d array):
"""
#------------------------------
# CHECK FOR PROGRAM PARAMETERS:
#------------------------------
if l_orders==None: l_orders = self.l_orders
if s_deblaz==None: s_deblaz = self.s_deblaz
#----------------
# SCRUNCH ORDERS:
#----------------
# Prepare uniform wavelength grid:
dl = np.median([np.median(np.diff(l_orders[i])) for i in range(self.n_orders)])
dl_orders = [np.arange(l_orders[i][0], l_orders[i][-1]+dl, dl) for i in range(self.n_orders)]
# Linear interpolate to uniform grid:
s_grids = [scipy.interpolate.griddata(l_orders[i], s_deblaz[i], dl_orders[i], method='nearest') \
for i in range(self.n_orders)]
#--------------
# MERGE ORDERS:
#--------------
# Find index of merge boarders:
dex0_min = np.where(dl_orders[0].astype(int)==3880)[0][0]
dex0_max = np.where(dl_orders[0].astype(int)==3915)[0][0]
dex1_min = np.where(dl_orders[1].astype(int)==3915)[0][0]
# Merge wavelength axis:
l_merge0 = dl_orders[0][dex0_min:dex0_max]
l_merge1 = dl_orders[1][dex1_min::]
l_merge = np.append(l_merge0, l_merge1)
# Merge flux axis:
s_merge0 = s_grids[0][dex0_min:dex0_max]
s_merge1 = s_grids[1][dex1_min::]
s_merge = np.append(s_merge0, s_merge1)
#--------------------------------------------------------------
if plot is 1: pt.plot_merge(s_merge, l_merge, [self.H, self.K])
#--------------------------------------------------------------
self.s = s_merge; self.l = l_merge
#--------------------------------------------------------------
return self.s, self.l
#####################################################################################################
# RV CORRECTION #
#####################################################################################################
def rv_correction(self, s=None, l=None, plot=0):
"""
This function is used to create a transit model for the Cross-Correlation (CC).
To create the model the subrutine called 'model' is used. To perform the CC the
CC coefficient is also needed and this is calculated in the subroutine
'cc_coefficients'. For the precision needed here future test of RV correction is
needed. For now Astropy's find the projected RV component including the stellar
motion, the baryocentric motion, and Earth's heliocentric and rotational velocities.
----------------------------
INPUT :
----------------------------
mask (2d array):
----------------------------
OUTPUT :
----------------------------
background_image (2d array):
"""
# Check if program parameters is defined:
if s==None: s = self.s
if l==None: l = self.l
#---------------
# RV CORRECTION:
#---------------
# Use astropy for Barycentric:
obstime = Time(self.datetime)
target = SkyCoord.from_name(self.target)
song = EarthLocation.of_site('roque de los muchachos') # Closest observatory to Teide
rv_baryc = target.radial_velocity_correction(obstime=obstime, location=song).to('km/s').value
# Use baryocentric + stellar RV amplitude as RV correction:
rv_shift = self.rv_amp - rv_baryc - self.rv_amp * rv_baryc / (scipy.constants.c/1e3)
# Use standard equation for RV shift (dl/l = v/c) calculated in [km/s]:
c = scipy.constants.c/1e3
delta_lambda = rv_shift / c * l
# Perform wavelenght shift:
l = l - delta_lambda
# Calculate this for approx results:
delta_l3950 = rv_shift / (scipy.constants.c/1e3) * 3950
delta_p3950 = delta_l3950 / np.diff(l)[0]
#---------------------------
# HANDLING IDL SUN SPECTRUM:
#---------------------------
# # Save IDL format to python:
# import scipy.io
# s_sun = scipy.io.readsav('{}sun_reference/ARCTURUS.IDL'.format(self.path_blues))
# np.savetxt('{}sun_reference/sun_python.txt'.format(self.path_blues), s_sun['atlas'])
# # Import sun spectrum and save smaller spectral domain:
# sun = np.loadtxt('{}/sun_reference/sun_python.txt'.format(self.path_blues))
# l_sun = np.array([sun[i][0] for i in range(len(sun))])
# s_sun = np.array([sun[i][2] for i in range(len(sun))])
# plt.figure()
# plt.plot(l_sun, s_sun, 'k-', linewidth=0.1)
# plt.show()
# # Find index of merge boarders:
# borders = [3880, 4020]
# i_min = np.where(l_sun.astype(int)==borders[0])[0][0]
# i_max = np.where(l_sun.astype(int)==borders[1])[0][0]
# # Merge wavelength axis and save data:
# l_sun = l_sun[i_min:i_max]
# s_sun = s_sun[i_min:i_max]
# np.savetxt('{}sun_reference/sun.txt'.format(self.path_blues), np.vstack([l_sun, s_sun]).T)
#-------------------------
# TRANSFORM AND NORMALIZE:
#-------------------------
# # Load sun data:
# sun = np.loadtxt('{}/sun_reference/sun.txt'.format(self.path_blues))
# l_sun = sun[:,0]
# s_sun = sun[:,1]
# # Tranform to regular grid:
# dl_sun = np.diff(l_sun)[0]
# l_gsun = np.arange(l_sun[0], l_sun[-1], dl_sun)
# # Interpolate:
# s_gsun = scipy.interpolate.griddata(l_sun, s_sun, l_gsun, method='cubic') # Sun
# s_gobs = scipy.interpolate.griddata(l, s, l_gsun, method='nearest') # Star observed
# # Flux normalize signals:
# s_gobs = s_gobs/np.max(s_gobs)
# # Inverted and Normalized signal MUST be used:
# x = (1-s_gobs) - np.mean(1-s_gobs)
# y = (1-s_gsun) - np.mean(1-s_gsun)
# #x = self.convolve(x, 'median', 1000)
# #y = self.convolve(y, 'median', 1000)
# print(np.diff(l_gsun))
# print(len(l_gsun))
# l_gsun_new = np.roll(l_gsun, 200)
# plt.figure()
# plt.plot(l_gsun, s_gsun, 'r-')
# plt.plot(l_gsun_new, s_gsun, 'b-')
# plt.plot(l_gsun, s_gobs, 'k-')
# plt.show()
#sys.exit()
#---------------------------
# PERFORM CROSS CORRELATION:
#---------------------------
# # Prepare indices for spectrum shift:
# dx = 200
# dy = np.arange(-dx, dx)
# # Perform Cross-correlation:
# cc = np.zeros(len(dy))
# for i in dy:
# y = np.roll(y, -i)
# r_cc = self.cc_coefficient(x, y)
# cc[i] = r_cc
# # Find peaks maximum:
# peaks_dex, _ = scipy.signal.find_peaks(cc)
# dy_peaks = dy[peaks_dex]
# cc_peaks = cc[peaks_dex]
# # Choose only resonable RV shifts (<200 km/s) and good cc (>0.8):
# peaks_good_dex = (dy_peaks>-200) * (dy_peaks<200) * (cc_peaks>0.8)
# dy_peaks = dy_peaks[peaks_good_dex]
# cc_peaks = cc_peaks[peaks_good_dex]
# Find max peak corresponding to RV:
# cc_max_dex = np.argmax(cc_peaks)
# dy_max = dy_peaks[cc_max_dex]
# cc_max = cc_peaks[cc_max_dex]
# plt.figure()
# plt.plot(dy, cc, 'k-')
# plt.plot(dy_peaks, cc_peaks, 'r+')
#plt.title('RV shift = {} km/s'.format(rv_mean))
#plt.axvline(max(cc), color='r')
#plt.plot(l_sun, y, 'k-')
#plt.plot(ll, x, 'b-')
#plt.xlim(-200, 200)
# plt.show()
#--------------------------------------------------------------
#if plot is 1: pt.plot_merge(s_merge, l_merge, [self.H, self.K])
#--------------------------------------------------------------
self.delta_v_baryc = rv_baryc
self.delta_v = rv_shift
self.delta_l = delta_l3950
self.delta_p = delta_p3950
self.l = l
#--------------------------------------------------------------
return self.l
def cc_coefficient(self, x, y):
"""
This function find the cross-correlation coefficienten between two datasets. Here x is the data
have an offset and y is the data that is cross correlated for every small step.
"""
cor = np.sum( (x-np.mean(x)) * (y-np.mean(y)) )
norm = np.sqrt( np.sum((x-np.mean(x))**2) * np.sum((x-np.mean(x))**2) )
r = cor/norm
#--------------------------------------------------------------
return r
########################################################################################################
# CONTINUUM NORMALIZATION #
########################################################################################################
def continuum_norm(self, l=None, s=None, rv_amp=0, plot=0):
"""
This function estimates
----------------------------
INPUT :
----------------------------
mask (2d array): Background mask with ones and zeros - uses rv corrected data only!
----------------------------
OUTPUT :
----------------------------
background_image (2d array):
"""
#------------------------------
# CHECK FOR PROGRAM PARAMETERS:
#------------------------------
if l==None: l = self.l
if s==None: s = self.s
#----------------------------
# NORMALIZE WITH POINTS ONLY:
#----------------------------
# Pseudo-continuum points used for the SSS included in the Ca H & K order:
ps_min, ps_max = 3912, 4000
# Find central index for wavelength:
dex_min = np.where(l.astype(int)==ps_min)[0][0]
dex_max = np.where(l.astype(int)==ps_max)[0][0]
# Values for points used to linear relation:
l_point = [l[dex_min], l[dex_max]]
s_point = [s[dex_max], s[dex_max]]
# Find linear relation:
coefs_point = np.polyfit(l_point, s_point, 0)
poly_point = np.poly1d(coefs_point)
s_norm_point = s/poly_point(l)
#-----------------------------
# NORMALIZE WITH HIGHEST PEAK:
#-----------------------------
# Find max peak around pseudo peaks:
# (This function needs a initial guess for the RV shift!)
dex_peak_min = self.find_peak_in_noise(s, dex_min, plot=0)
dex_peak_max = self.find_peak_in_noise(s, dex_max, plot=0)
# Values for fit:
l_peak = [l[dex_peak_min], l[dex_peak_max]]
s_peak = [s[dex_peak_min], s[dex_peak_max]]
# Find linear relation:
coefs_peak = np.polyfit(l_peak, s_peak, 1)
poly_peak = np.poly1d(coefs_peak)
s_norm_peak = s/poly_peak(l)
#------------------------------
# NORMALIZE WITH MEAN BANDPASS:
#------------------------------
# Another methods is to use pseudo-continuum bandpass':
bold_band_min = (l > ps_min-0.5)*(l < ps_min+0.5)
bold_band_max = (l > ps_max-0.5)*(l < ps_max+0.5)
# Find meadian bandpass values:
l_mean = [np.median(l[bold_band_min]), np.median(l[bold_band_max])]
s_mean = [np.median(s[bold_band_min]), np.median(s[bold_band_max])]
# Find linear relation:
coefs_mean = np.polyfit(l_mean, s_mean, 1)
poly_mean = np.poly1d(coefs_mean)
s_norm_mean = s/poly_mean(l)
#--------------------------------------------------------------
if plot is 1:
pt.plot_continuum_norm_all(l, s, [l_point, l_peak, l_mean],[s_point, s_peak, s_mean], \
[s_norm_point, s_norm_peak, s_norm_mean], \
[poly_point, poly_peak, poly_mean], [self.K, self.H])
#--------------------------------------------------------------
self.l = l
self.s = [s_norm_peak, s_norm_point, s_norm_mean]
#--------------------------------------------------------------
return self.l, self.s
########################################################################################################
# S-INDEX #
########################################################################################################
def eshel_sindex(self, S, F, trace=None, order_width=None, l_orders=None, f_orders=None, \
l=None, s=None, plot=0):
"""
This function estimates
----------------------------
INPUT :
----------------------------
mask (2d array): Background mask with ones and zeros
----------------------------
OUTPUT :
----------------------------
background_image (2d array):
"""
#--------------------------
# CHECK PROGRAM PARAMETERS:
#--------------------------
if trace==None: trace = self.trace
if order_width==None: order_width = self.order_width
if l_orders==None: l_orders = self.l_orders
if f_orders==None: f_orders = self.f_orders
if l ==None: l = self.l
if s ==None: s = self.s
#------------------
# FIND UNCERTAINTY:
#------------------
self.uncertainty(S, F, trace, order_width, l_orders, f_orders, l, s[0], plot=1)
#---------------------------------------------
# FIND FLUX AND UNCERTAINTY FOR EACH BANDPASS:
#--------------------------------------------
result0 = self.find_bandpass_fluxes(l, s[0])
result1 = self.find_bandpass_fluxes(l, s[1])
result2 = self.find_bandpass_fluxes(l, s[2])
results = [result0, result1, result2]
#--------------
# FIND S INDEX:
#--------------
sindices = [self.sindex(results[i], l, save=i) for i in range(len(results))]
# Find fractional difference between each continuum method:
sindex_diff12 = abs(sindices[0] - sindices[1]) / sindices[0] * 100
sindex_diff13 = abs(sindices[0] - sindices[2]) / sindices[0] * 100
print(sindex_diff12)
print(sindex_diff13)
#--------------------------------------------------------------
if plot is 1:
self.sindex = sindices
self.results()
#--------------------------------------------------------------
return
def uncertainty(self, F, S, trace, order_width, l_orders, f_orders, l, s, plot=0):
"""
This utility estimates the mean flux-uncertainty in a given bandpass using S/N ratio.
"""
#-------------------------------------
# S/N RATIO OF ALONG EACH ORDER BLAZE:
#-------------------------------------
# Calculate mean sky background from background subtracted image:
self.f_flux_sky, _ = self.mean_background(F, trace, plot=0)
self.s_flux_sky, _ = self.mean_background(S, trace, plot=0)
# Scale flat orders to that of the stellar orders:
f_flux_obj = f_orders
s_flux_obj = [f_flux_obj[i]/self.dif_max[i] for i in range(self.n_orders)]
# Convolve data for smoothing:
f_conv_obj = [self.convolve(f_flux_obj[i], 'mean', 10) for i in range(self.n_orders)]
s_conv_obj = [self.convolve(s_flux_obj[i], 'mean', 10) for i in range(self.n_orders)]
# Find SNR along the orders:
f_snr_orders = np.zeros(self.n_orders)
s_snr_orders = np.zeros(self.n_orders)
for i in range(self.n_orders):
fsnr = [self.signal_to_noise(f_conv_obj[i][j], order_width, self.f_flux_sky) \
for j in range(self.len_disp)]
ssnr = [self.signal_to_noise(s_conv_obj[i][j], order_width, self.s_flux_sky) \
for j in range(self.len_disp)]
f_snr_orders = [f_snr_orders[i], fsnr]
s_snr_orders = [s_snr_orders[i], ssnr]
# Find SNR peak maxima:
self.f_snr_max = [max(f_snr_orders[i]) for i in range(self.n_orders)]
self.s_snr_max = [max(s_snr_orders[i]) for i in range(self.n_orders)]
#----------------------------
# UNCERTAINTY FROM S/N RATIO:
#----------------------------
# Min and max indices for each bandpass:
l_min = [self.V-self.VR_bandpass, self.K-self.HK_bandpass, \
self.H-self.HK_bandpass, self.R-self.VR_bandpass]
l_max = [self.V+self.VR_bandpass, self.K+self.HK_bandpass, \
self.H+self.HK_bandpass, self.R+self.VR_bandpass]
# Order nr to loop over:
j = [0, 1, 1, 1]
# Find mean S/N and uncertainty in each bandpass:
self.f_snr_X = np.zeros(4)
self.s_snr_X = np.zeros(4)
self.sigma_f_snr = np.zeros(4)
self.sigma_s_snr = np.zeros(4)
self.std_f = np.zeros(4)
for i in range(4):
# Find min and max bandpass pixel indices:
X_pix_min = np.where(min(l_orders[j[i]], key=lambda x:abs(x-(l_min[i])))==l_orders[j[i]])[0][0]
X_pix_max = np.where(min(l_orders[j[i]], key=lambda x:abs(x-(l_max[i])))==l_orders[j[i]])[0][0]
# Find number of pixels used in bandpass:
n_pix_X = len(range(X_pix_min, X_pix_max))
# Estimate flux uncertainty from each bandpass:
f_snr_X = np.sum(f_snr_orders[j[i]][X_pix_min:X_pix_max]) / n_pix_X
s_snr_X = np.sum(s_snr_orders[j[i]][X_pix_min:X_pix_max]) / n_pix_X
# Estimate S/N and uncertainties:
self.sigma_f_snr[i] = 1/f_snr_X
self.sigma_s_snr[i] = 1/s_snr_X
self.f_snr_X[i] = f_snr_X
self.s_snr_X[i] = s_snr_X
# Estimate flat scatter:
f = f_orders[j[i]]/np.max(f_orders[j[i]])
f_std0 = self.convolve(f, 'std', 2)
f_std = self.convolve(f_std0, 'mean', 100)
self.std_f[i] = np.mean(f_std[X_pix_min:X_pix_max])
# plt.figure()
# plt.plot(l_orders[j[i]], f_std0, 'k-', linewidth=1.0, label=r'$\sigma_i$')
# plt.plot(l_orders[j[i]], f_std, 'r-', linewidth=1.2, label=r'$\sigma_i/\mu_i$')
# plt.show()
# Uncertainty internally from spectrum:
s_mea = self.convolve(s, 'mean', 2)
s_dif = s/s_mea - 1
s_std0 = self.convolve(s, 'std', 2)
s_std = self.convolve(s_std0, 'mean', 100)
#----------------------------
# ALL UNCERTAINTY CONSIDERED:
#----------------------------
# Shot Noise from flat blaze:
self.sigma_f = np.sum(self.std_f)
# Three lines from order #57 and one from order #58 is used:
self.sigma_w = ( 1/4*self.sigma_w[0] + 3/4*self.sigma_w[1] )/2
# Find bandpass indices:
_, _, bands = self.find_bandpass_fluxes(l, s, plot)
V_indices, R_indices = bands[0], bands[1]
K1_indices, H1_indices = bands[2], bands[3]
Km_indices, Hm_indices = bands[4], bands[5]
K2_indices, H2_indices = bands[6], bands[7]
# Translate into flux uncertainty from each bandpass:
self.std_V = np.sum(s_std[V_indices]) / len(V_indices) # Continuum bands
self.std_R = np.sum(s_std[R_indices]) / len(R_indices)
self.std_K1 = np.sum(s_std[K1_indices]) / len(K1_indices) # Used for 1.09 Å square band fluxes
self.std_H1 = np.sum(s_std[H1_indices]) / len(H1_indices)
self.std_Km = np.sum(s_std[Km_indices]) / len(Km_indices) # Used for mean fluxes per wavelength
self.std_Hm = np.sum(s_std[Hm_indices]) / len(Hm_indices)
self.std_K2 = np.sum(s_std[K2_indices]) / len(K2_indices) # Used for triangular integrated fluxes
self.std_H2 = np.sum(s_std[H2_indices]) / len(H2_indices)
self.sigma_bands = [s_std[K2_indices], s_std[H2_indices]] # Used for triangular norm fluxes
# Combined uncertainties to be used for error propagation:
x = self.sigma_w + self.sigma_f
self.sigma_V = self.std_V + x
self.sigma_R = self.std_R + x
self.sigma_K1 = self.std_K1 + x; self.sigma_Km = self.std_Km + x; self.sigma_K2 = self.std_K2 + x
self.sigma_H1 = self.std_H1 + x; self.sigma_Hm = self.std_Hm + x; self.sigma_H2 = self.std_H2 + x
#--------------------------------------------------------------
if plot is 1: pt.plot_sindex_scatter(l, s_dif, s_std0, s_std, self.bands)
#--------------------------------------------------------------
return s_std
def find_bandpass_fluxes(self, l, s, plot=0):
"""
This utility simply find the bandpass indices and fluxes given the bandpass widths.
"""
# Shortwrite parameters:
HK = self.HK_bandpass
VR = self.VR_bandpass
#------------------------------------------------
# FIND INITIAL INDICES NEEDED FOR ALL BANDPASSES:
#------------------------------------------------
# Find central bandpass index:
V_dex_cen = (np.abs(l-(self.V))).argmin()
K_dex_cen = (np.abs(l-(self.K))).argmin()
H_dex_cen = (np.abs(l-(self.H))).argmin()
R_dex_cen = (np.abs(l-(self.R))).argmin()
# Find wavelength indices for each bandpass:
V_dex = (np.abs(l-(self.V+VR/2))).argmin() - (np.abs(l-(self.V-VR/2))).argmin()
R_dex = (np.abs(l-(self.R+VR/2))).argmin() - (np.abs(l-(self.R-VR/2))).argmin()
# Find wavelength indices for 1.09 Å bandpass:
K1_dex = (np.abs(l-(self.K+HK/2))).argmin() - (np.abs(l-(self.K-HK/2))).argmin()
H1_dex = (np.abs(l-(self.H+HK/2))).argmin() - (np.abs(l-(self.H-HK/2))).argmin()
# Find wavelength indices for 2 x 1.09 Å lower widths:
K2_dex = (np.abs(l-(self.K+HK))).argmin() - (np.abs(l-(self.K-HK))).argmin()
H2_dex = (np.abs(l-(self.H+HK))).argmin() - (np.abs(l-(self.H-HK))).argmin()
# Select only even bandswidths:
for VR_dex_i in [V_dex, R_dex]:
if VR_dex_i % 2 == 0: VR_dex = int(VR_dex_i / 2)
else: VR_dex = int((VR_dex_i - 1) / 2)
for HK1_dex_i in [K1_dex, H1_dex]:
if HK1_dex_i % 2 == 0: HK1_dex = int(HK1_dex_i / 2)
else: HK1_dex = int((HK1_dex_i - 1) / 2)
for HK2_dex_i in [K2_dex, H2_dex]:
if HK2_dex_i % 2 == 0: HK2_dex = int(HK2_dex_i / 2)
else: HK2_dex = int((HK2_dex_i - 1) / 2)
# Find square bandpass indices:
V_indices = np.arange(V_dex_cen-VR_dex, V_dex_cen+VR_dex)
R_indices = np.arange(R_dex_cen-VR_dex, R_dex_cen+VR_dex)
# Covert to a full-range indices:
K1_indices = np.arange(K_dex_cen-HK1_dex, K_dex_cen+HK1_dex)
H1_indices = np.arange(H_dex_cen-HK1_dex, H_dex_cen+HK1_dex)
# Covert to a full-range indices:
K2_indices = np.arange(K_dex_cen-HK2_dex, K_dex_cen+HK2_dex+1)
H2_indices = np.arange(H_dex_cen-HK2_dex, H_dex_cen+HK2_dex+1)
#--------------------------------------
# DEFINE TRIANGULAR H AND K BANDPASSES:
#--------------------------------------
# Split out index ranges:
k1_indices = np.arange(K_dex_cen-HK2_dex, K_dex_cen)
k2_indices = np.arange(K_dex_cen, K_dex_cen+HK2_dex+1)
h1_indices = np.arange(H_dex_cen-HK2_dex, H_dex_cen)
h2_indices = np.arange(H_dex_cen, H_dex_cen+HK2_dex+1)
# Compute linear relations on either side triangle:
coefs_k1 = np.polyfit([K_dex_cen-HK2_dex, K_dex_cen], [0, 1], 1); poly_k1 = np.poly1d(coefs_k1)
coefs_k2 = np.polyfit([K_dex_cen, K_dex_cen+HK2_dex], [1, 0], 1); poly_k2 = np.poly1d(coefs_k2)
coefs_h1 = np.polyfit([H_dex_cen-HK2_dex, H_dex_cen], [0, 1], 1); poly_h1 = np.poly1d(coefs_h1)
coefs_h2 = np.polyfit([H_dex_cen, H_dex_cen+HK2_dex], [1, 0], 1); poly_h2 = np.poly1d(coefs_h2)
# Find count values for each line:
s_tri_k1 = poly_k1(k1_indices)
s_tri_k2 = poly_k2(k2_indices)
s_tri_h1 = poly_h1(h1_indices)
s_tri_h2 = poly_h2(h2_indices)
# Combine triangular count values:
s_tri_K = np.append(s_tri_k1, s_tri_k2)
s_tri_H = np.append(s_tri_h1, s_tri_h2)
# Define finer regular grid:
l_k1_grid = np.linspace(l[k1_indices[0]], l[k1_indices[-1]], 1e4)
l_k2_grid = np.linspace(l[k2_indices[0]], l[k2_indices[-1]], 1e4)
l_h1_grid = np.linspace(l[h1_indices[0]], l[h1_indices[-1]], 1e4)
l_h2_grid = np.linspace(l[h2_indices[0]], l[h2_indices[-1]], 1e4)
# Interpolate triangular data function:
s_tri_k1_grid = scipy.interpolate.griddata(l[k1_indices], s_tri_k1, l_k1_grid, method='linear')
s_tri_k2_grid = scipy.interpolate.griddata(l[k2_indices], s_tri_k2, l_k2_grid, method='linear')
s_tri_h1_grid = scipy.interpolate.griddata(l[h1_indices], s_tri_h1, l_h1_grid, method='linear')
s_tri_h2_grid = scipy.interpolate.griddata(l[h2_indices], s_tri_h2, l_h2_grid, method='linear')
# Interpolate spectral data:
s_spc_k1_grid = scipy.interpolate.griddata(l[k1_indices], s[k1_indices], l_k1_grid, method='linear')
s_spc_k2_grid = scipy.interpolate.griddata(l[k2_indices], s[k2_indices], l_k2_grid, method='linear')
s_spc_h1_grid = scipy.interpolate.griddata(l[h1_indices], s[h1_indices], l_h1_grid, method='linear')
s_spc_h2_grid = scipy.interpolate.griddata(l[h2_indices], s[h2_indices], l_h2_grid, method='linear')
# Find wavelength value of intersection:
dex_k1_inter = np.where(np.abs(s_spc_k1_grid - s_tri_k1_grid) < 5e-4)[0][0]
dex_k2_inter = np.where(np.abs(s_spc_k2_grid - s_tri_k2_grid) < 5e-4)[0][-1]
dex_h1_inter = np.where(np.abs(s_spc_h1_grid - s_tri_h1_grid) < 5e-4)[0][0]
dex_h2_inter = np.where(np.abs(s_spc_h2_grid - s_tri_h2_grid) < 5e-4)[0][-1]
# Find coordinates of intersection points:
l_k1_inter, s_k1_inter = l_k1_grid[dex_k1_inter], s_tri_k1_grid[dex_k1_inter]
l_k2_inter, s_k2_inter = l_k2_grid[dex_k2_inter], s_tri_k2_grid[dex_k2_inter]
l_h1_inter, s_h1_inter = l_h1_grid[dex_h1_inter], s_tri_h1_grid[dex_h1_inter]
l_h2_inter, s_h2_inter = l_h2_grid[dex_h2_inter], s_tri_h2_grid[dex_h2_inter]
# Find closest wavelength index to intersection point:
dex_spc_k1_inter = (np.abs(l_k1_inter - l[k1_indices])).argmin()
dex_spc_k2_inter = (np.abs(l_k2_inter - l[k2_indices])).argmin()
dex_spc_h1_inter = (np.abs(l_h1_inter - l[h1_indices])).argmin()
dex_spc_h2_inter = (np.abs(l_h2_inter - l[h2_indices])).argmin()
#------------------
# FIND FLUX VALUES:
#------------------
# Find fluxes in each continuum passbands:
V_fluxes = s[V_indices]
R_fluxes = s[R_indices]
# Find final count values for retangular 1.09Å filter:
K1_fluxes = s[K1_indices]
H1_fluxes = s[H1_indices]
# Find final count values for mean filter:
Km_indices = np.arange(k1_indices[dex_spc_k1_inter], k2_indices[dex_spc_k2_inter])
Hm_indices = np.arange(h1_indices[dex_spc_h1_inter], h2_indices[dex_spc_h2_inter])
Km_fluxes = s[Km_indices]
Hm_fluxes = s[Hm_indices]
# Find final count values for triangular filter:
k1_fluxes = s_tri_k1[:dex_spc_k1_inter].tolist()
h1_fluxes = s_tri_h1[:dex_spc_h1_inter].tolist()
k2_fluxes = s_tri_k2[dex_spc_k2_inter:].tolist()
h2_fluxes = s_tri_h2[dex_spc_h2_inter:].tolist()
K2_fluxes = np.array(k1_fluxes + Km_fluxes.tolist() + k2_fluxes)
H2_fluxes = np.array(h1_fluxes + Hm_fluxes.tolist() + h2_fluxes)
# Construct coordinate array for precise polygon-area calculation:
Kp_wave = np.array([l[K2_indices[0]], l_k1_inter] + l[Km_indices].tolist() + \
[l_k2_inter, l[K2_indices[-1]] ])
Hp_wave = np.array([l[H2_indices[0]], l_h1_inter] + l[Hm_indices].tolist() + \
[l_h2_inter, l[H2_indices[-1]] ])
Kp_fluxes = np.array([0, s_k1_inter] + Km_fluxes.tolist() + [s_k2_inter, 0])
Hp_fluxes = np.array([0, s_h1_inter] + Hm_fluxes.tolist() + [s_h2_inter, 0])
Kp_coors = np.array([Kp_wave, Kp_fluxes])
Hp_coors = np.array([Hp_wave, Hp_fluxes])
# Combine to return:
band_fluxes = [V_fluxes, R_fluxes, K1_fluxes, H1_fluxes, Km_fluxes, Hm_fluxes, \
K2_fluxes, H2_fluxes, Kp_coors, Hp_coors]
tri_function = [s_tri_K, s_tri_H]
band_indices = [V_indices, R_indices, K1_indices, H1_indices, Km_indices, Hm_indices, \
K2_indices, H2_indices, K_dex_cen, H_dex_cen]
#--------------------------------------------------------------
if plot is 1:
pt.plot_sindex_bands(l, s, s_tri_K, s_tri_H, K2_indices, H2_indices, K2_fluxes, H2_fluxes, \
l_k1_inter, l_k2_inter, l_h1_inter, l_h2_inter, \
s_k1_inter, s_k2_inter, s_h1_inter, s_h2_inter, \
Kp_wave, Hp_wave, Kp_fluxes, Hp_fluxes, Km_indices, Hm_indices, \
self.K, self.H, K1_indices, H1_indices)
pt.plot_sindex_fluxes(l, s, band_indices, band_fluxes, self.bands)
#--------------------------------------------------------------
return band_fluxes, tri_function, band_indices
def sindex(self, flux_results, l, save=None):
"""
This utility is a general function to calculate the S index in (1) the standard way and (2) using
mean bandpass fluxes if 'dex_meanband' is available.
"""
# Unpack flux results:
bandfluxes, tri_func, bandindices = flux_results[0], flux_results[1], flux_results[2]
# Find V an R bandpass fluxes:
val_V = np.sum(bandfluxes[0]); V = ufloat(val_V, val_V*self.sigma_V)
val_R = np.sum(bandfluxes[1]); R = ufloat(val_R, val_R*self.sigma_R)
#--------------------------------------------
# FIND S INDEX FROM 1.09 Å INTEGRATED FLUXES:
#--------------------------------------------
val_K1 = np.sum(bandfluxes[2]); K1 = ufloat(val_K1, val_K1*self.sigma_K1)
val_H1 = np.sum(bandfluxes[3]); H1 = ufloat(val_H1, val_H1*self.sigma_H1)
# Calculate S index:
sindex_HK1 = 8 * (H1 + K1)/(R + V) *2.4
#--------------------------------------------
# FIND S INDEX FROM MEAN FLUX PER WAVELENGTH:
#--------------------------------------------
val_Vm = np.mean(bandfluxes[0]); Vm = ufloat(val_Vm, val_Vm*self.sigma_V)
val_Rm = np.mean(bandfluxes[1]); Rm = ufloat(val_Rm, val_Rm*self.sigma_R)
val_Km = np.mean(bandfluxes[6]); Km = ufloat(val_Km, val_Km*self.sigma_Km)
val_Hm = np.mean(bandfluxes[7]); Hm = ufloat(val_Hm, val_Hm*self.sigma_Hm)
# val_Km = np.mean(bandfluxes[4]); Km = ufloat(val_Km, val_Km*self.sigma_Km)
# val_Hm = np.mean(bandfluxes[5]); Hm = ufloat(val_Hm, val_Hm*self.sigma_Hm)
# Calculate S index:
sindex_HKm = 8 * (Hm + Km)/(Rm + Vm) * self.HK_bandpass/self.VR_bandpass * 2.4
#------------------------------------------------------
# FIND S INDEX FROM TRIANGULAR BANDSPASS NORMALIZATION:
#------------------------------------------------------
val_Kn = np.sum(bandfluxes[6] * tri_func[0])
val_Hn = np.sum(bandfluxes[7] * tri_func[1])
sigma_Kn = np.sum(self.sigma_bands[0] * tri_func[0])
sigma_Hn = np.sum(self.sigma_bands[1] * tri_func[1])
Kn = ufloat(val_Kn, val_Kn*sigma_Kn)
Hn = ufloat(val_Hn, val_Hn*sigma_Hn)
# Calculate S index:
sindex_HKn = 8 * (Hn + Kn)/(R + V) *2.4
#------------------------------------------------
# FIND S INDEX FROM TRIANGULAR INTEGRATED FLUXES:
#------------------------------------------------
val_K2 = np.sum(bandfluxes[6]); K2 = ufloat(val_K2, val_K2*self.sigma_K2)
val_H2 = np.sum(bandfluxes[7]); H2 = ufloat(val_H2, val_H2*self.sigma_H2)
# Calculate S index:
sindex_HK2 = 8 * (H2 + K2)/(R + V)
#----------------------------------------------
# FIND S INDEX FROM INCLOSED POLYGON FLUX AREA:
#----------------------------------------------
# Unpack, reverse, add starting point:
l_V = l[bandindices[0]].tolist(); lr_V = l_V[::-1]; lr_V = [l_V[0]] + [l_V[-1]] + lr_V
l_R = l[bandindices[1]].tolist(); lr_R = l_R[::-1]; lr_R = [l_R[0]] + [l_R[-1]] + lr_R
l_K = bandfluxes[8][0].tolist(); lr_K = l_K[::-1]; lr_K = [l_K[0]] + lr_K
l_H = bandfluxes[9][0].tolist(); lr_H = l_H[::-1]; lr_H = [l_H[0]] + lr_H
s_V = bandfluxes[0].tolist(); sr_V = s_V[::-1]; sr_V = [0] + [0] + sr_V
s_R = bandfluxes[1].tolist(); sr_R = s_R[::-1]; sr_R = [0] + [0] + sr_R
s_K = bandfluxes[8][1].tolist(); sr_K = s_K[::-1]; sr_K = [0] + sr_K
s_H = bandfluxes[9][1].tolist(); sr_H = s_H[::-1]; sr_H = [0] + sr_H
# list for calculation:
val_Vp = self.polygon_area(np.array([lr_V, sr_V]).T)
val_Kp = self.polygon_area(np.array([lr_K, sr_K]).T)
val_Hp = self.polygon_area(np.array([lr_H, sr_H]).T)
val_Rp = self.polygon_area(np.array([lr_R, sr_R]).T)
# plt.figure()
# plt.plot(lr_V, sr_V, 'b-')
# plt.plot(lr_V, sr_V, 'r*')
# plt.show()
# Combine with uncertainty:
Vp = ufloat(val_Vp, val_Vp*self.sigma_V)
Kp = ufloat(val_Kp, val_Kp*self.sigma_K2)
Hp = ufloat(val_Hp, val_Hp*self.sigma_H2)
Rp = ufloat(val_Rp, val_Rp*self.sigma_R)
# Calculate S index:
sindex_HKp = 8 * (Hp + Kp)/(Rp + Vp)
#--------------------------------------------------------------
if save is 0:
self.s1 = ['1:', val_V, val_K1, val_H1, val_R, sindex_HK1]
self.sn = ['n:', val_V, val_Kn, val_Hn, val_R, sindex_HKn]
self.sm = ['m:', val_Vm, val_Km, val_Hm, val_Rm, sindex_HKm]
self.s2 = ['2:', val_V, val_K2, val_H2, val_R, sindex_HK2]
self.sp = ['p:', val_Vp, val_Kp, val_Hp, val_Rp, sindex_HKp]
return np.array([sindex_HK1, sindex_HKn, sindex_HKm, sindex_HK2, sindex_HKp])
def polygon_area(self, xy, plot=0):
"""
This utility takes coordinates (x, y) ordered in an array and calculates the polygon area enclosed.
The coordinates needs to be ordered in a counter-clock-wise manner since the circuference using
Green's theorem is used to equate the polygon area.
"""
l = len(xy)
s = 0.0
for i in range(l):
j = (i+1)%l # keep index in [0,l)
s += (xy[j,0] - xy[i,0])*(xy[j,1] + xy[i,1])
#--------------------------------------------------------------
return -0.5*s
def results(self):
print('################################################')
print(' {} - {} '.format(self.target, self.date))
print('################################################')
head_SF = self.hdul[self.SF_dex[0]][0].header
head_FF = self.hdul[self.FF_dex[0]][0].header
print('Magnitude = {}, Seeing = {}'.format(self.magnitude, self.seeing))
print('Exptime flat: t = {} s'.format(head_SF['EXPTIME']))
print('Exptime star: t = {} s'.format(head_FF['EXPTIME']))
print('------------------------------------------------')
print(' CCD NOISE PROPERTIES ')
print('------------------------------------------------')
BF_mean, BF_std = np.mean(self.BF), np.std(self.BF)
DF_mean, DF_std = np.mean(self.DF), np.std(self.DF)
print('Bias master : mean = {:.4g}, std = {:.4g}'.format(BF_mean, BF_std))
print('Dark current: mean = {:.4g}, std = {:.4g}'.format(DF_mean, DF_std))
print('GAIN = {:.3g} e-/ADU'.format(self.gain))
print('RON = {:.3g} ADU'.format(BF_std))
print('VAR = {:.3g} ADU (=<RON^2>)'.format(BF_std**2))
print('------------------------------------------------')
print(' BACKGROUND SKY & SCATTER ')
print('------------------------------------------------')
print('Flat mean background counts: {:.1f}'.format(self.f_flux_sky))
print('Star mean background counts: {:.1f}'.format(self.s_flux_sky))
print('------------------------------------------------')
print(' RV CORRECTION ')
print('------------------------------------------------')
print('Barycentric RV correction: {:.2f} km/s'.format(self.delta_v_baryc))
print('Star motion RV Correction: {:.2f} km/s'.format(self.rv_amp))
print('Correction in velocity : {:.2f} km/s'.format(self.delta_v))
print('Correction in wavelength : {:.2f} Å'.format(self.delta_l))
print('Correction in pixelspace : {:.2f}'.format(self.delta_p))
print('------------------------------------------------')
print(' SNR & UNCERTAINTIES ')
print('------------------------------------------------')
f_snr_max, s_snr_max = self.f_snr_max, self.s_snr_max
print('S/N in order #57: {:.1f} (flat), {:.1f} (star)'.format(f_snr_max[1], s_snr_max[1]))
print('S/N in order #58: {:.1f} (flat), {:.1f} (star)'.format(f_snr_max[0], s_snr_max[0]))
print('------------------------------------------------')
snx = [self.s_snr_X[0], self.s_snr_X[1], self.s_snr_X[2], self.s_snr_X[3]]
snr =[self.sigma_s_snr[0]*100,self.sigma_s_snr[1]*100,self.sigma_s_snr[2]*100,self.sigma_s_snr[3]*100]
std = [self.sigma_V*100, self.sigma_K1*100, self.sigma_H1*100, self.sigma_R*100]
print('Bandpass : V K H R | Total')
print('S/N : {:.3g} {:.3g} {:.3g} {:.3g} |'.format(snx[0], snx[1], snx[2], snx[3]))
print('sigma(S/N): {:.3g}% {:.3g}% {:.3g}% {:.3g}% | {:.1f}%'.format(snr[0], snr[1], snr[2],\
snr[3], np.sum(snr)))
print('sigma(std): {:.3g}% {:.3g}% {:.3g}% {:.3g}% | {:.1f}%'.format(std[0], std[1], std[2],\
std[3], np.sum(std)))
print('sigma(wav): | {:.2f}%'.format(self.sigma_w*100))
print('sigma(fla): | {:.2f}%'.format(self.sigma_f*100))
print('------------------------------------------------')
print(' S INDEX ')
print('------------------------------------------------')
print(self.s1)
print(self.sn)
print(self.sm)
print(self.s2)
print(self.sp)
print('------------------------------------------------')
########################################################################################################
# OPTIMAL WIDTHS #
########################################################################################################
def find_optimal_width(self, image=None, trace=None, plot=0):
"""
This utility takes most preferably a reduced flat image and the polynomial describtion traced,
and first cut out a bandpass defined by disp_lenght and cross_width. Looping through increasing
spatials widths the S/N ratio is found for each, and the spatial width asigned to the highest
S/N ratio is optimal for linear extraction. To return the results in terms of FWHM a Gauss function
is fitted to the spatial width of maximum flux.
"""
# Check if 'image' and 'trace' is defined:
if image==None: image = self.F_calib
if trace==None: trace = self.trace
# Cut out order:
widths = np.arange(1, 40)
order = self.cut_out_order(image, np.polyval(trace['order_2'], self.disp), widths[-1])
# Find maximum of blaze function:
blaze = order.sum(axis=1)
blaze_max = np.max(blaze)
index_max = np.nanargmax(blaze)
# Find mean sky background along disp direction used for S/N ratio:
flux_inter, _ = self.mean_background(image, trace, plot=0)
# Loop over spatial widths:
snr = np.zeros(len(widths))
for w in widths:
order_w = order[index_max, widths[-1]-1-w:widths[-1]-1+w]
flux_order = np.sum(order_w)
snr[w-1] = self.signal_to_noise(flux_order, len(order_w), flux_order)
# Find highest S/N ratio optimal order width:
index_max_snr = np.argmax(snr)
optimal_order_width = widths[index_max_snr]
# Find residual inter-order width:
order_distance = int(((self.ref_cen_pos[1] - self.ref_cen_pos[2]) + \
(self.ref_cen_pos[2] - self.ref_cen_pos[3]))/2)
#optimal_inter_order_width = int(order_distance - 2.5*optimal_order_width)
#--------------------------------------------------------------
if plot is 1:
pt.plot_optimal_width(widths, order, blaze_max, index_max, flux_inter, snr, optimal_order_width)
#--------------------------------------------------------------
self.order_width = optimal_order_width
#--------------------------------------------------------------
return self.order_width
def mean_background(self, image, trace, plot=0):
"""
This utility use 'trace' and 'cut_out_order' to select the pixel sky-background in a bandpass on
both sides of the order of interest. In spatial direction on each side the median pixel value is
found, and lastly the mean value of each side is then computed. Returned is a 1D spectrum describing
the background (e.g. used by the 'signal_to_noise' utility).
"""
# Find midpoint of inter orders:
midpoint_below = (self.ref_cen_pos[1] - self.ref_cen_pos[2])/2
midpoint_above = (self.ref_cen_pos[2] - self.ref_cen_pos[3])/2
# Move fit to the midpoint of inter orders:
yfit_below = np.polyval(trace['order_1'], self.disp) + np.ones(len(self.disp))*midpoint_below
yfit_above = np.polyval(trace['order_2'], self.disp) + np.ones(len(self.disp))*midpoint_above
yfit_order = np.polyval(trace['order_2'], self.disp) + np.ones(len(self.disp))
# Set cross width for background cut to half the distance between orders:
# (here the position of the order is a limitation)
cross_order_width = math.floor(yfit_below[0])*2 - 1
# (else if order are moved up use)
#cross_order_width = int((self.ref_cen_pos[1] - self.ref_cen_pos[2])[0]/2 - 1)
# Cut out stellar background on both sides:
back_below = self.cut_out_order(image, yfit_below, cross_order_width)
back_above = self.cut_out_order(image, yfit_above, cross_order_width)
# Sum order to 1D spectrum and mean them:
l_sky = (np.median(back_below, axis=1) + np.median(back_above, axis=1))/2.
flux_sky_mean = abs(l_sky.mean())
#-----------------------------------------------------------:
if plot is 1: pt.plot_sky_background(image, self.disp, yfit_below, yfit_above, yfit_order, l_sky)
#--------------------------------------------------------------
return flux_sky_mean, l_sky
def signal_to_noise(self, flux_star, n_pix_star, flux_sky):
"""
This function calculates the S/N ratio using the 1D spectrum of the object and sky-background.
Purely by statistics with and increasing number of pixel used to define the object 'n_pix_object',
the S/N ratio will decrease. The noise sources describing a CCD are the 'gain' (e-/ADU) and 'ron',
read-out-noise (e-).
"""
# See Schroeder (1999) p. 317 or Bradt (2004) p. 163:
signal = flux_star*self.gain
noise = np.sqrt(flux_star*self.gain + flux_sky*self.gain*n_pix_star + self.ron*n_pix_star)
#--------------------------------------------------------------
return signal / noise
########################################################################################################
# GENERAL UTILITIES SPECIALIZED TO THIS SOFTWARE #
########################################################################################################
def blue_moves(self, path, plot=0):
"""
This routine measures the drift of the spectrum over time by using ThAr lines in the same order
as the Ca II H & K lines. (Fun fact: the software name comes from 'Blue Moves' which is the eleventh
studio album release by <NAME>, released in October 1976.
"""
# Load all files from same folder:
img_files = np.sort(glob.glob('{}{}*'.format(path, self.img_name)))
hdu = np.array([fits.open(str(files)) for files in img_files])
n = len(img_files)
# Find time scaling to utc time and Julian Date
time = [hdu[i][0].header['JD-DATE'] for i in range(n)]
# Loop through all ThAr images:
move_x = np.zeros(n)
move_y = np.zeros(n)
sigma_x = np.zeros(n-1)
sigma_y = np.zeros(n-1)
for i in range(n):
# Open and close one image at a time:
with fits.open(str(img_files[i])) as hdu_i:
# Select focused spectral region:
T_i = hdu_i[0].data[300:480, 420:2270].T
# UTILITY CALL: Locate coordinates of lines:
COF_i, _, _ = self.peak_finder(T_i, sigma=5, plot=0)
# UTILITY CALL: Remove lines too close to borders:
COF_i, N_lines = self.image_border(T_i, COF_i)
# UTILITY CALL: Only use same lines each time:
if i==0:
#COF_0, _, _ = self.peak_finder(T_i, sigma=5, plot=0)
COF_0 = COF_i
if i is not 0:
indices0, indices1 = self.match_coordinates(COF_0, COF_i, threshold=5, plot=1)
# Find scatter of the drift for each line:
if i > 1:
diff_x = COF_i[indices1,0] - x
diff_y = COF_i[indices1,1] - y
sigma_x[i-1] = np.std(diff_x)
sigma_y[i-1] = np.std(diff_y)
# Find coordinates (x and y needs to be after if < 1 statement):
x = COF_i[indices1,0]
y = COF_i[indices1,1]
move_x[i] = x.mean()
move_y[i] = y.mean()
# Print to bash:
pt.compilation(i, n, 'Blue Moves')
print
# Convert to relative changes:
move_x = move_x[1::] - move_x[1::].mean()
move_y = move_y[1::] - move_y[1::].mean()
time = time[1::]
#-----------------------------------------------------------
if plot is 1:
np.savetxt('{}bluemoves.txt'.format(self.path), np.vstack([time, move_y, sigma_y]).T)
pt.plot_rv_stability(time, move_y, sigma_y)
#-----------------------------------------------------------
return
def image_border(self, image, pixel_coor, border_edge=20):
"""
This utility takes an array of pixel coordinates and finds coordinates that is closer than 20 pixels
to the image 'border_edge'. These coordinates are then removed from the array and a new array,
'new_pixel_coor', is returned together with the new (lower) number of coordinates 'N_coor'.
"""
# Unpack pixel coordinates:
x = pixel_coor[:,0]
y = pixel_coor[:,1]
# Check if stellar coordinates are too close to borders:
i_x1 = np.where(x < border_edge)[0]
i_y1 = np.where(y < border_edge)[0]
i_x2 = np.where(x > np.shape(image)[0]-border_edge)[0]
i_y2 = np.where(y > np.shape(image)[1]-border_edge)[0]
i_xy = np.hstack([i_x1, i_x2, i_y1, i_y2])
# Discard these coordinates:
x_new = np.delete(x, i_xy)
y_new = np.delete(y, i_xy)
N_coor = len(x)
#-----------------------------------------------------------
return np.array([x_new, y_new]).T, N_coor
########################################################################################################
# GENERAL STRUCTURAL ALGORITHMS #
########################################################################################################
def peak_finder(self, pixel_array, min_pix=7, sigma=1, plot=0):
"""
This utility takes a pixel array and use the 'scipy.ndimage' package to find local maxima within an
image. These are determined upon the number of standard deviations, 'sigma', and a minimum of pixels,
'min_pix', a structure should be considered. From the returned structure the same package determines
the Center Of Flux ('COF') in coordinate space (x, y), and the circular 'radius' for each, together
with the number of local maximum structures, 'N_struct', detected within the pixel_array.
"""
# FIND CENTER OF FLUX FOR STARS ABOVE THRESHOLD:
# Define threshold as a number of standard deviations above the mean:
threshold = np.mean(pixel_array) + sigma*np.std(pixel_array)
# Find all pixels above the threshold:
above_threshold = np.where(pixel_array > threshold, 1, 0)
# Label the structures (where starmap = 1 that are adjacent to others):
labels, N_structs = scipy.ndimage.label(above_threshold, structure = np.ones((3,3)))
# Sum the number of elements in each structure:
sums = scipy.ndimage.sum(above_threshold, labels, range(1,N_structs+1))
# Choose only structures with more than min_pix elements (+1 for index mismatch):
structs = np.where(sums > min_pix)[0] + 1
# Define starmap as 0 where there are no stars and 1 where there are stars:
struct_map = np.zeros(np.shape(pixel_array))
for struct in structs: struct_map = struct_map + np.where(labels == struct, 1, 0)
# Label all the structures again:
labels, N_structs = scipy.ndimage.label(struct_map, structure = np.ones((3,3)))
# Find the center of flux of all the structures found above threshold:
COF = scipy.ndimage.center_of_mass(pixel_array, labels, range(1, N_structs+1))
# Estimate the radius of the structures in pixels:
radius = np.sqrt(sums[structs-1]/np.pi)
# From tuple to array:
COF = np.asarray(COF)
#--------------------------------------------------------------
if plot is 1: # NEEDS ACTIVATION FROM SOURCE
plt.figure()
plt.imshow(pt.linear(pixel_array.T), cmap='Blues', origin='lower')
plt.scatter(COF[:,0], COF[:,1], s=radius*12, facecolors='none', edgecolors='r', marker='s')
plt.show()
#--------------------------------------------------------------
return COF, radius, N_structs
def find_peak_in_noise(self, s, peak_coor, plot=0):
"""
This utility identifies the
"""
# Define limits for peak search:
limits = [int(peak_coor-25), int(peak_coor+25)]
# Different conditions:
conv = self.res_power * 1e-3 # Smooth-filter scale linear with resolving power
width = self.len_disp/10 # Width scales likewise with the pixel scale
# Find all peaks:
peaks_all_dex, _ = scipy.signal.find_peaks(s)
peaks_all_val = s[peaks_all_dex]
# Find all approx peaks from convolved spectrum:
s_conv = self.convolve(s, 'median', int(conv))
s_conv = self.convolve(s_conv, 'mean', int(conv))
peaks_conv_dex, _ = scipy.signal.find_peaks(s_conv)
peaks_conv_val = s_conv[peaks_conv_dex]
# Select peaks inside disp limits range:
ndarray = (peaks_conv_dex > limits[0]) * (peaks_conv_dex < limits[1])
peaks_limit_dex = peaks_conv_dex[ndarray]
peaks_limit_val = peaks_conv_val[ndarray]
# Find x highest peaks from convolved spectrum:
peak_conv_dex = heapq.nlargest(1, np.arange(len(peaks_limit_val)), key=peaks_limit_val.__getitem__)[0]
peak_conv_pix = peaks_limit_dex[peak_conv_dex]
# Make bold array around peak with dobbelt the width of conv:
peak_dex = (peaks_all_dex>peak_conv_pix-conv)*(peaks_all_dex<peak_conv_pix+conv)
# Select peak:
peak = peaks_all_dex[peak_dex][np.argmax(peaks_all_val[peak_dex])]
#--------------------------------------------------------------
if plot is 1:
pt.plot_arc_peak(s, s_conv, peaks_limit_dex, peaks_limit_val, peaks_all_dex, \
peaks_all_val, peak, limits)
#--------------------------------------------------------------
return peak
def convolve(self, data0, filtertype, n):
"""
This function can be used to correct for slow trends using e.g. a "moving mean" filter. The utility
takes the flatten data, a string with the desired filter, and n number of points is should smooth
the data with. Compared to the bottleneck package this function do not leave a offset.
"""
# Constants:
data = data0.copy() # Avoid overwritting data:
data_new = np.zeros(len(data)) # To pick up new data
nzero = np.zeros(2*n+1) # optimization constant
# Available filters:
if filtertype=='mean': moving_filter = np.mean
if filtertype=='median': moving_filter = np.median
if filtertype=='sum': moving_filter = np.sum
if filtertype=='std': moving_filter = np.std
# Interval: d[n, 1+n, ... , N-1, N-n]
for i in range(len(data)-2*n):
data_new[n+i] = moving_filter(data[range((n+i)-n, (n+i)+n+1)])
for i in range(n):
# Interval: d[-n, -(n-1), ... , n-1, n] - Low end of data
low = nzero
low[range(n-i)] = data[0]*np.ones(n-i)
low[-(n+1+i):] = data[range(0, n+1+i)]
data_new[i] = moving_filter(low)
# Interval: d[N-n, N-(n-1), ... , N+(n-1), N+n] - High end of data
high = nzero
high[range(n+1+i)] = data[range(len(data)-(n+i+1), len(data))]
high[-(n-i):] = data[-1]*np.ones(n-i)
data_new[len(data)-1-i] = moving_filter(high)
#--------------------------------------------------------------
return data_new
def match_coordinates(self, array1, array2, threshold=10, plot=0):
"""
This function match two set of coordinates. This is done by a purely geometrical technique and
looking at the histogram. It finds the minimum distance from i'th array1 star to every other array2
star. Here indices is the rows with all the indices of matching. To select only common coordinates,
then use 'indices2'.
"""
# Placeholders:
value_min = np.zeros(len(array1))
index_min = {}
# FIND MINIMUM DISTANCE WITH PYTHAGOREAN GEOMETRY:
for i in range(len(array1)):
d = | np.sqrt( (array2[:,0]-array1[i,0])**2 + (array2[:,1]-array1[i,1])**2 ) | numpy.sqrt |
# Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
# from memory_profiler import profile
import os
from toast_planck.reproc_modules.destripe_tools import (fast_hit_binning,
fast_binning)
import scipy.signal
from toast import qarray as qa
from toast.mpi import MPI
from toast_planck.preproc_modules import MapSampler, flagged_running_average
import astropy.io.fits as pf
import numpy as np
import toast.fod as tf
import toast.timing as timing
class OpNoiseEstim():
def __init__(
self, signal=None, flags=None, detmask=1, commonmask=3, out=None,
maskfile=None, mapfile=None, rimo=None, pol=True, nbin_psd=1000,
lagmax=100000, stationary_period=86400., nosingle=False,
no_spin_harmonics=False, calibrate_signal_estimate=False,
nsum=10, naverage=100):
self._signal = signal
self._flags = flags
self._detmask = detmask
self._commonmask = commonmask
self._out = out
self._maskfile = maskfile
self._mapfile = mapfile
if rimo is None:
raise RuntimeError('OpNoiseEstim: You must provide a RIMO')
self._rimo = rimo
self._pol = pol
self._nbin_psd = nbin_psd
self._lagmax = lagmax
self._stationary_period = stationary_period
self._nosingle = nosingle
self._no_spin_harmonics = no_spin_harmonics
self._calibrate_signal_estimate = calibrate_signal_estimate
# Parameters for downsampling the data
self._nsum = nsum
self._naverage = naverage
def exec(self, data):
cworld = data.comm.comm_world
rank = cworld.Get_rank()
masksampler = None
if self._maskfile:
masksampler = MapSampler(self._maskfile, comm=cworld)
mapsampler = None
if self._mapfile:
mapsampler = MapSampler(self._mapfile, comm=cworld, pol=True)
for obs in data.obs:
tod = obs['tod']
local_intervals = tod.local_intervals(obs['intervals'])
dets = tod.local_dets
ndet = len(dets)
timestamps = tod.local_timestamps()
commonflags = tod.local_common_flags()
commonflags = (commonflags & self._commonmask != 0)
fsample = self.subtract_signal(
tod, cworld, rank, masksampler, mapsampler, local_intervals)
# Extend the gap between intervals to prevent sample pairs
# that cross the gap.
intervals = obs['intervals']
gap_min = np.int(self._lagmax) + 1
# Downsampled data requires longer gaps
gap_min_nsum = np.int(self._lagmax * self._nsum) + 1
offset, nsamp = tod.local_samples
gapflags = np.zeros_like(commonflags)
gapflags_nsum = np.zeros_like(commonflags)
for ival1, ival2 in zip(intervals[:-1], intervals[1:]):
gap_start = ival1.last + 1
gap_stop = max(gap_start + gap_min, ival2.first)
gap_stop_nsum = max(gap_start + gap_min_nsum, ival2.first)
if gap_start < offset + nsamp and gap_stop > offset:
gap_start = max(0, gap_start - offset)
gap_stop = min(offset + nsamp, gap_stop - offset)
gapflags[gap_start:gap_stop] = True
gap_stop_nsum = min(offset + nsamp, gap_stop_nsum - offset)
gapflags_nsum[gap_start:gap_stop_nsum] = True
for idet1 in range(ndet):
for idet2 in range(idet1, ndet):
det1 = dets[idet1]
det2 = dets[idet2]
if det1 == det2 and self._nosingle:
continue
signal1 = tod.local_signal(det1)
flags1 = tod.local_flags(det1, name=self._flags)
flags = (flags1 & self._detmask != 0)
signal2 = None
flags2 = None
if det1 != det2:
signal2 = tod.local_signal(det2)
flags2 = tod.local_flags(det2, name=self._flags)
flags[flags2 & self._detmask != 0] = True
flags[commonflags] = True
self.process_noise_estimate(
signal1, signal2, flags, gapflags, gapflags_nsum,
timestamps, fsample, cworld, rank, 'noise', det1, det2,
local_intervals)
return
def subtract_signal(self, tod, cworld, rank, masksampler, mapsampler,
local_intervals):
""" Subtract a signal estimate from the TOD and update the
flags for noise estimation.
"""
start_signal_subtract = MPI.Wtime()
for det in tod.local_dets:
if rank == 0:
print('Subtracting signal for {}'.format(det), flush=True)
tod.cache.report()
fsample = self._rimo[det].fsample
epsilon = self._rimo[det].epsilon
eta = (1 - epsilon) / (1 + epsilon)
signal = tod.local_signal(det, name=self._signal)
flags = tod.local_flags(det, name=self._flags)
flags &= self._detmask
for ival in local_intervals:
ind = slice(ival.first, ival.last + 1)
sig = signal[ind]
flg = flags[ind]
quat = tod.local_pointing(det)[ind]
if self._pol:
theta, phi, psi = qa.to_angles(quat)
iw = np.ones_like(theta)
qw = eta * np.cos(2 * psi)
uw = eta * np.sin(2 * psi)
iquw = np.column_stack([iw, qw, uw])
else:
theta, phi = qa.to_position(quat)
if masksampler is not None:
maskflg = masksampler.at(theta, phi) < 0.5
flg[maskflg] |= 255
if mapsampler is not None:
if self._pol:
bg = mapsampler.atpol(theta, phi, iquw)
else:
bg = mapsampler.at(theta, phi)
if self._calibrate_signal_estimate:
good = flg == 0
ngood = np.sum(good)
if ngood > 1:
templates = np.vstack([np.ones(ngood), bg[good]])
invcov = np.dot(templates, templates.T)
cov = np.linalg.inv(invcov)
proj = np.dot(templates, sig[good])
coeff = np.dot(cov, proj)
bg = coeff[0] + coeff[1] * bg
sig -= bg
cworld.barrier()
stop_signal_subtract = MPI.Wtime()
if rank == 0:
print('TOD signal-subtracted in {:.2f} s'.format(
stop_signal_subtract - start_signal_subtract),
flush=True)
return fsample
def decimate(self, x, flg, gapflg, local_intervals):
# Low-pass filter with running average, then downsample
xx = x.copy()
flags = flg.copy()
for ival in local_intervals:
ind = slice(ival.first, ival.last + 1)
xx[ind], flags[ind] = flagged_running_average(
x[ind], flg[ind], self._naverage,
return_flags=True)
return xx[::self._nsum].copy(), (flags + gapflg)[::self._nsum].copy()
"""
def highpass(self, x, flg):
# Flagged real-space high pass filter
xx = x.copy()
j = 0
while j < x.size and flg[j]: j += 1
alpha = .999
for i in range(j+1, x.size):
if flg[i]:
xx[i] = x[j]
else:
xx[i] = alpha*(xx[j] + x[i] - x[j])
j = i
xx /= alpha
return xx
"""
def log_bin(self, freq, nbin=100, fmin=None, fmax=None):
if np.any(freq == 0):
raise Exception('Logarithmic binning should not include '
'zero frequency')
if fmin is None:
fmin = np.amin(freq)
if fmax is None:
fmax = np.amax(freq)
bins = np.logspace(np.log(fmin), np.log(fmax), num=nbin + 1,
endpoint=True, base=np.e)
bins[-1] *= 1.01 # Widen the last bin not to have a bin with one entry
locs = np.digitize(freq, bins).astype(np.int32)
hits = np.zeros(nbin + 2, dtype=np.int32)
fast_hit_binning(locs, hits)
return locs, hits
def bin_psds(self, my_psds, fmin=None, fmax=None):
my_binned_psds = []
my_times = []
binfreq0 = None
for i in range(len(my_psds)):
t0, _, freq, psd = my_psds[i]
good = freq != 0
if self._no_spin_harmonics:
# Discard the bins containing spin harmonics and their
# immediate neighbors
for i0 in range(1, 3):
f0 = i0 / 60.
for i in range(1, 30):
fmask = f0 * i
imin = np.argmin(np.abs(freq - fmask))
if i == 1:
# The first bin has a wider neighborhood
good[imin - 2:imin + 3] = False
else:
good[imin - 1:imin + 2] = False
if self._nbin_psd is not None:
locs, hits = self.log_bin(freq[good], nbin=self._nbin_psd,
fmin=fmin, fmax=fmax)
binfreq = np.zeros(hits.size)
fast_binning(freq[good], locs, binfreq)
binfreq = binfreq[hits != 0] / hits[hits != 0]
else:
binfreq = freq
hits = np.ones(len(binfreq))
if binfreq0 is None:
binfreq0 = binfreq
else:
if np.any(binfreq != binfreq0):
raise Exception('Binned PSD frequencies change')
if self._nbin_psd is not None:
binpsd = np.zeros(hits.size)
fast_binning(psd[good], locs, binpsd)
binpsd = binpsd[hits != 0] / hits[hits != 0]
else:
binpsd = psd
my_times.append(t0)
my_binned_psds.append(binpsd)
return my_binned_psds, my_times, binfreq0
def discard_spin_harmonics(self, binfreq, all_psds):
ind = binfreq != 0
for i0 in range(1, 3):
f0 = i0 / 60.
for i in range(1, 10):
fmask = f0 * i
imin = np.argmin(np.abs(binfreq - fmask))
if i == 1:
ind[imin - 1:imin + 2] = False
else:
ind[imin] = False
binfreq = binfreq[ind]
all_psds = all_psds[:, ind]
return binfreq, all_psds
def discard_outliers(self, binfreq, all_psds, all_times):
all_psds = list(all_psds)
all_times = list(all_times)
nrow, ncol = np.shape(all_psds)
# Discard empty PSDs
i = 1
nbad = 0
all_psds = list(all_psds)
all_times = list(all_times)
while i < nrow:
p = all_psds[i]
if np.all(p == 0) or np.any(np.isnan(p)):
del all_psds[i]
del all_times[i]
nrow -= 1
nbad += 1
else:
i += 1
if nbad > 0:
print('Discarded {} empty or NaN psds'.format(nbad), flush=True)
# Throw away outlier PSDs by comparing the PSDs in specific bins
if nrow > 10:
all_good = np.isfinite(np.sum(all_psds, 1))
for col in range(ncol - 1):
if binfreq[col] < .001:
continue
# Local outliers
psdvalues = np.array([x[col] for x in all_psds])
smooth_values = scipy.signal.medfilt(psdvalues, 11)
good = np.ones(psdvalues.size, dtype=np.bool)
good[psdvalues == 0] = False
for i in range(10):
# Local test
diff = np.zeros(psdvalues.size)
diff[good] = np.log(psdvalues[good]) - \
np.log(smooth_values[good])
sdev = | np.std(diff[good]) | numpy.std |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import nibabel as nib
from dltk.io.preprocessing import whitening,normalise_zero_one
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import matplotlib.pyplot as plt
import numpy as np
import fnmatch
import AD_Constants as constants
import requests
import sys
from imgaug import augmenters as iaa
from numpy import random
import time
import re
import tensorflow as tf
from tensorflow.python.keras.api.keras.preprocessing.image import ImageDataGenerator,img_to_array, load_img,array_to_img
class Dataset_Import():
def __init__(self):
self.main_directory=constants.main_image_directory
self.training_part=constants.training_frac
self.i = 0
self.auto_shuffling_state=False
self.trainer_shuffling_state = False
self.train_ad_fnames = None
self.train_mci_fnames = None
self.train_nc_fnames = None
self.source=constants.source
self.target=constants.target
self.img_shape_tuple=constants.img_shape_tuple
self.img_channel=constants.img_channel
self.shu_control_state=False
self.data_augmentation=constants.augment_data
self.image_group=constants.chosen_epi_format
self.strict_match=constants.strict_match
self.pic_index =constants.pic_index
self.train_dir =constants.train_dir
self.validation_dir =constants.validation_dir
self.set_checker=0
self.classify_group=constants.classify_group
# Directory with our training AD dataset
self.train_ad_dir =constants.train_ad_dir
# Directory with our training MCI dataset
self.train_mci_dir = constants.train_mci_dir
# Directory with our training NC dataset
self.train_nc_dir =constants.train_nc_dir
# Directory with our validation AD dataset
self.validation_ad_dir = constants.validation_ad_dir
# Directory with our validation MCI dataset
self.validation_mci_dir =constants.validation_mci_dir
# Directory with our validation NC dataset
self.validation_nc_dir =constants.validation_nc_dir
self.nrows =constants.nrows
self.ncols = constants.ncols
def statistics(self):
#read_directory_file
print('total training AD Data:', self.readNiiFiles(self.train_ad_dir,augment_data=True,gan_train=True), end="\n")
# print('total training MCI Data:', len(self.readNiiFiles(self.train_mci_dir,augment_data=False)), end="\n")
# print('total training NC Data:', len(self.readNiiFiles(self.train_nc_dir,augment_data=True)), end="\n")
# print('total validation AD Data:', len(self.readNiiFiles(self.validation_ad_dir,augment_data=True)), end="\n")
# print('total validation MCI Data:', len(self.readNiiFiles(self.validation_mci_dir,augment_data=False)), end="\n")
# print('total validation NC Data:', len(self.readNiiFiles(self.validation_nc_dir,augment_data=False)), end="\n")
#print('total main validation AD Data:', len(self.readNiiFiles(constants.main_validation_ad_dir, augment_data=False,checker=9)), end="\n")
#print('total main validation MCI Data:', len(self.readNiiFiles(constants.main_validation_mci_dir, augment_data=False,checker=9)), end="\n")
#print('total main validation NC Data:', len(self.readNiiFiles(constants.main_validation_nc_dir, augment_data=False,checker=9)), end="\n")
def readNiiFiles(self,original_dir,augment_data=False,checker=8,gan_train=False):
# creating destinations folders
counter = 0
group_data = []
ad_class =original_dir.split(os.sep)[-1]
domain_class = original_dir.split(os.sep)[-2]
if domain_class =='train':
source = '1.5T'
elif domain_class =='validation':
source='3.0T'
elif domain_class == 'model_validate':
source='1.5T'
checker_file=""
for root, dir, f_names in os.walk(original_dir):
for f in f_names:
if f.lower().find(".nii") > -1:
sourcefile = os.path.join(root, f)
if sourcefile.find("skull_workflow") <= -1 :
for fgroup in self.image_group:
if fgroup in sourcefile:
if sourcefile.split(os.sep)[checker] != checker_file:
checker_file = sourcefile.split(os.sep)[checker]
try:
label =self.get_nii_group(ad_class)
source_label =self.get_nii_source(source)
if gan_train == False :
group_data.append([sourcefile, label, source_label])
if augment_data == True:
for l in range(constants.augment_factor):
aug_source_file = sourcefile.replace('.nii',str(l+1)+'_aug.nii')
group_data.append([aug_source_file, label, source_label])
#print("data" ,[sourcefile, label, source_label])
except (IOError, OSError) as e:
print(e.errno)
sys.exit(2)
return group_data
def readNiiFiles_2(self,original_dir,augment_data=False,gan_train=False,augment_factor=constants.augment_factor,limit_number=0):
# creating destinations folders
group_data = []
ad_class =original_dir.split(os.sep)[-1]
domain_class = original_dir.split(os.sep)[-2]
augment_counter=0
augment_pause=False
augment_limit=constants.max_class_value-limit_number
if domain_class =='train':
source = '1.5T'
elif domain_class =='validation':
source='3.0T'
elif domain_class == 'model_validate':
source='1.5T'
else:
source='1.5T'
for root, dir, f_names in os.walk(original_dir):
for f in f_names:
if f.lower().find(".nii") > -1:
sourcefile = os.path.join(root, f)
#check=sourcefile.split(os.sep)[9]
# check.lower().find("localizer") < 0
if True:
try:
label =self.get_nii_group(ad_class)
source_label =self.get_nii_source(source)
if gan_train == False :
group_data.append([sourcefile, label, source_label])
if augment_data == True:
if augment_pause==False:
aug_source_file = sourcefile.replace('.nii',str(1)+'_aug.nii')
group_data.append([aug_source_file, label, source_label])
augment_counter =augment_counter+1
if augment_counter==limit_number:
augment_pause=True
#print("data" ,[sourcefile, label, source_label])
except (IOError, OSError) as e:
print(e.errno)
sys.exit(2)
augment_pause=False
augment_counter=0
return group_data
def read_directory_file(self,original_dir,dx_group,source=None):
group_data = []
try:
if original_dir is not None:
for file in os.listdir(original_dir):
if os.path.isdir(os.path.join(original_dir,file)):
filepath = os.path.join(original_dir, file)
for sub_file in os.listdir(filepath):
if os.path.isdir(os.path.join(filepath,sub_file)):
filepath2 = os.path.join(filepath, sub_file)
for sub_file2 in os.listdir(filepath2):
decision_check=None
if self.strict_match :
decision_check= sub_file2.strip() in self.image_group
#str(self.image_group) == str(sub_file.strip())
if decision_check==True :
modal_groupings_source=os.path.join(filepath2 ,sub_file2)
files_in_time = len(os.listdir(modal_groupings_source))
if files_in_time > 1:
time_file=max(os.listdir(modal_groupings_source))
else:
time_file=os.listdir(modal_groupings_source)[0]
time_grouping_source=os.path.join(modal_groupings_source ,time_file)
files_in_time=len(os.listdir(modal_groupings_source))
for image_file in os.listdir(time_grouping_source):
image_grouping_source = os.path.join(time_grouping_source, image_file)
if os.path.isdir(os.path.join(image_grouping_source)):
if os.path.isdir(os.path.join(image_grouping_source, "skull_workflow")):
image_grouping_source=os.path.join(image_grouping_source,"skull_workflow")
if os.path.isdir(os.path.join(image_grouping_source,"BSE")):
image_grouping_source = os.path.join(image_grouping_source,"BSE")
for image_file in os.listdir(image_grouping_source):
pattern="*.bse.nii.gz"
if fnmatch.fnmatch(image_file,pattern):
image_grouping_source_file=os.path.join(image_grouping_source,image_file)
label=self.get_nii_group(dx_group)
source_label=self.get_nii_source(source)
group_data.append([image_grouping_source_file,label,source_label])
image_grouping_source=""
image_grouping_source_file=""
except OSError as e:
print('Error: %s' % e)
return group_data
def try_replace(self):
st='/HDD/public/data/ADNI_NEW_DATASET/train/MCI/016_S_11493_aug.nii'
#st.replace('_aug.nii', '.nii')
if st.find("_aug.nii") > -1:
st = re.sub("\d_aug.nii",'.nii',st)
return st
def convert_nii_to_image_data(self,nii_path):
if nii_path.find("_aug.nii") > -1:
nii_path = re.sub("\d_aug.nii",'.nii',nii_path)
image_load = nib.load(nii_path, mmap=False)
img_data = self.dataAugment(np.asarray(image_load.get_data()[:,:,:,0]))
else:
image_load = nib.load(nii_path, mmap=False)
#use_shape=image_load
img_data = np.asarray(image_load.get_data()[:,:,:,0])
shape = img_data.shape
#print(shape)
#img_data = img_data[65:shape[0]-65,30:shape[1]-30, int(shape[2]/2)-40:int(shape[2]/2)+40]
#img_data = img_data[45:105, 40:170, 55:115]
#img_data = img_data[int(shape[2] / 2) - 65:int(shape[2] / 2) + 65,int(shape[2] / 2) - 70:int(shape[2] / 2) + 50, int(shape[2] / 2) - 40:int(shape[2] / 2) + 40]
#img_data = img_data[0:90, 0:90,:]
img_data = img_data.astype(np.float32)
img_data -= np.mean(img_data)
img_data/=np.maximum(np.std(img_data),10**(-5))
img_data = np.pad(img_data, pad_width=((int(np.ceil(1.0 * (constants.img_shape_tuple[0] - shape[0]) / 2)),
int(np.floor(1.0 * (constants.img_shape_tuple[0] - shape[0]) / 2))), (
int(np.ceil(1.0 * (constants.img_shape_tuple[1] - shape[1]) / 2)),
int(np.floor(1.0 * (constants.img_shape_tuple[1] - shape[1]) / 2))),
(int(np.ceil(1.0 * (constants.img_shape_tuple[2] - shape[2]) / 2)),
int(np.floor(1.0 * (constants.img_shape_tuple[2] - shape[2]) / 2)))),
mode="constant", constant_values=(0, 0))
#print("shape",img_data.shape)
# img_to_array
# seg=iaa.Sequential([
# iaa.BilateralBlur(d=(3,10), sigma_color=(10,255), sigma_space=(10,255))
# ])
#
# #print(np.resize(img_data,constants.img_bilateral_filter).shape)
#
# filter_bilateral=seg.augment_image(np.resize(np.uint8(img_data),constants.img_bilateral_filter))
#determine_shape = np.resize(img_data, self.img_shape_tuple)
#print("data",normalise_zero_one(determine_shape+ np.random.normal(0, 0.05, determine_shape.shape))) normalise_zero_one(img_data)
return img_data
#normalise_zero_one(
#determine_shape*(1./255)
def convert_nii_to_image_dataK(self,nii_path):
if nii_path.find("_aug.nii") > -1:
nii_path = re.sub("\d_aug.nii",'.nii',nii_path)
image_load = nib.load(nii_path, mmap=False)
img_data = self.dataAugment(np.asarray(image_load.get_data()))
else:
image_load = nib.load(nii_path, mmap=False)
img_data = np.asarray(image_load.get_data())
K_SHAPE=(self.img_shape_tuple[0],self.img_shape_tuple[1],self.img_shape_tuple[2],self.img_channel)
#img_to_array
determine_shape = np.resize(img_data,K_SHAPE)
#print("data",normalise_zero_one(determine_shape+ np.random.normal(0, 0.05, determine_shape.shape)))
return normalise_zero_one(determine_shape)
def convert_nii_to_image_real(self,nii_path):
if nii_path.find("_aug.nii") > -1:
nii_path = re.sub("\d_aug.nii",'.nii',nii_path)
image_load = nib.load(nii_path, mmap=False)
img_data = np.asarray(image_load.get_data())
else:
image_load = nib.load(nii_path, mmap=False)
img_data = np.asarray(image_load.get_data())
#img_to_array
determine_shape = np.resize(img_data, self.img_shape_tuple)
#print("data",normalise_zero_one(determine_shape+ np.random.normal(0, 0.05, determine_shape.shape)))
return normalise_zero_one(determine_shape)
#normalise_zero_one(
#determine_shape*(1./255)
def one_hot_encode(self,vec):
'''
For use to one-hot encode the 3- possible labels
'''
n_classes=len(constants.classify_group)
return np.eye(n_classes)[vec]
def one_hot_encode_d(self,vec):
'''
For use to one-hot encode the 3- possible labels
'''
n_classes=2
return np.eye(n_classes)[vec]
def get_nii_group(self,nii_path):
img_label=nii_path
if img_label=="AD":
label=1
elif img_label=="NC":
label=2
elif img_label=="MCI":
label=0
return label
def get_nii_source(self, source_target):
source_label = source_target
if source_label == "1.5T":
source_label = 0
elif source_label == "3.0T":
source_label = 1
return source_label
def all_source_data(self,augment_data=False):
if "AD" in constants.classify_group:
all_ad_train = self.readNiiFiles_2(self.train_ad_dir,augment_data=augment_data,limit_number=264)
if "MCI" in constants.classify_group:
all_mci_train = self.readNiiFiles_2(self.train_mci_dir,augment_data=augment_data,limit_number=320)
if "NC" in constants.classify_group:
all_nc_train = self.readNiiFiles_2(self.train_nc_dir,augment_data=augment_data,limit_number=320)
data_feed=[]
for groups in constants.classify_group:
if groups=="AD":
data_feed=data_feed+all_ad_train
elif groups=="MCI":
data_feed = data_feed + all_mci_train
elif groups=="NC":
data_feed=data_feed + all_nc_train
all_source = [img_path for i, img_path in enumerate(data_feed)]
all_source = np.array(all_source)
self.set_random_seed(random.random_integers(1200))
return self.shuffle(all_source)
def all_source_gan(self, augment_data=False):
if "AD" in constants.classify_group:
all_ad_train = self.readNiiFiles_2(self.train_ad_dir, augment_data=False,gan_train=False)
if "MCI" in constants.classify_group:
all_mci_train = self.readNiiFiles_2(self.train_mci_dir, augment_data=False,gan_train=False)
if "NC" in constants.classify_group:
all_nc_train = self.readNiiFiles_2(self.train_nc_dir, augment_data=False,gan_train=False)
data_feed = []
for groups in constants.classify_group:
if groups == "AD":
data_feed = data_feed + all_ad_train
elif groups == "MCI":
data_feed = data_feed + all_mci_train
elif groups == "NC":
data_feed = data_feed + all_nc_train
all_source = [img_path for i, img_path in enumerate(data_feed)]
all_source = np.array(all_source)
self.set_random_seed(random.random_integers(1200))
return self.shuffle(all_source)
def all_main_validate(self, augment_data=False):
if "AD" in constants.classify_group:
all_ad_val = self.readNiiFiles_2(constants.main_validation_ad_dir, augment_data=augment_data)
if "MCI" in constants.classify_group:
all_mci_val = self.readNiiFiles_2(constants.main_validation_mci_dir, augment_data=augment_data)
if "NC" in constants.classify_group:
all_nc_val = self.readNiiFiles_2(constants.main_validation_nc_dir, augment_data=augment_data)
data_feed = []
for groups in constants.classify_group:
if groups == "AD":
data_feed = data_feed + all_ad_val
elif groups == "MCI":
data_feed = data_feed + all_mci_val
elif groups == "NC":
data_feed = data_feed + all_nc_val
all_val = [img_path for i, img_path in enumerate(data_feed)]
all_val = np.array(all_val)
self.set_random_seed( | random.random_integers(1200) | numpy.random.random_integers |
import pytest
pytest.importorskip('numpy')
import numpy as np
import pytest
import dask.array as da
from dask.array.utils import assert_eq
def test_linspace():
darr = da.linspace(6, 49, chunks=5)
nparr = np.linspace(6, 49)
assert_eq(darr, nparr)
darr = da.linspace(1.4, 4.9, chunks=5, num=13)
nparr = np.linspace(1.4, 4.9, num=13)
assert_eq(darr, nparr)
darr = da.linspace(6, 49, chunks=5, dtype=float)
nparr = np.linspace(6, 49, dtype=float)
assert_eq(darr, nparr)
darr = da.linspace(1.4, 4.9, chunks=5, num=13, dtype=int)
nparr = np.linspace(1.4, 4.9, num=13, dtype=int)
assert_eq(darr, nparr)
assert (sorted(da.linspace(1.4, 4.9, chunks=5, num=13).dask) ==
sorted(da.linspace(1.4, 4.9, chunks=5, num=13).dask))
assert (sorted(da.linspace(6, 49, chunks=5, dtype=float).dask) ==
sorted(da.linspace(6, 49, chunks=5, dtype=float).dask))
def test_arange():
darr = da.arange(77, chunks=13)
nparr = np.arange(77)
assert_eq(darr, nparr)
darr = da.arange(2, 13, chunks=5)
nparr = np.arange(2, 13)
assert_eq(darr, nparr)
darr = da.arange(4, 21, 9, chunks=13)
nparr = np.arange(4, 21, 9)
assert_eq(darr, nparr)
# negative steps
darr = da.arange(53, 5, -3, chunks=5)
nparr = np.arange(53, 5, -3)
assert_eq(darr, nparr)
darr = da.arange(77, chunks=13, dtype=float)
nparr = np.arange(77, dtype=float)
assert_eq(darr, nparr)
darr = da.arange(2, 13, chunks=5, dtype=int)
nparr = np.arange(2, 13, dtype=int)
assert_eq(darr, nparr)
assert (sorted(da.arange(2, 13, chunks=5).dask) ==
sorted(da.arange(2, 13, chunks=5).dask))
assert (sorted(da.arange(77, chunks=13, dtype=float).dask) ==
sorted(da.arange(77, chunks=13, dtype=float).dask))
# 0 size output
darr = da.arange(0, 1, -0.5, chunks=20)
nparr = np.arange(0, 1, -0.5)
assert_eq(darr, nparr)
darr = da.arange(0, -1, 0.5, chunks=20)
nparr = np.arange(0, -1, 0.5)
assert_eq(darr, nparr)
def test_arange_has_dtype():
assert da.arange(5, chunks=2).dtype == | np.arange(5) | numpy.arange |
import glob
import subprocess
import numpy as np
import dynapack.LSwrite as LSw
from fnmatch import filter as fltr
import matplotlib.pyplot as plt
""" Functions for post-processing of LS-DYNA files """
def readrcforc( Inputfilename):
""" Function to read rcforc files from LS-DYNA
Run as
rc = readrcforc('rcforc')
Plot as
plt.plot(rc[contactID,forcedir,:])
"""
ncontacts=0
contactnames = []
f = open(Inputfilename,'r')
for i in range(0,5): dummy = f.readline()
# Read to find number of contacts
temp=0
while temp>-1:
line = f.readline()
if 'END' in line:
temp = -5
line = f.readline()
else:
ncontacts = ncontacts + 1
s=' '
seq = line.strip().split()[1:]
contactnames.append(s.join(seq))
# Create 2d list for data storage
rcforc= [[] for i in range(ncontacts)]
# Read reaction forces for each contact
temp=0
while temp>-1:
for i in range(0,ncontacts*2):
line = f.readline()
if 'master' in line:
#print line
contactID = int(float(line.strip().split()[1]))
time = float(line.strip().split()[3])
Fx = float(line.strip().split()[5])
Fy = float(line.strip().split()[7])
Fz = float(line.strip().split()[9])
Mx = float(line.strip().split()[13])
My = float(line.strip().split()[15])
Mz = float(line.strip().split()[17])
Fres = np.sqrt(Fx**2 + Fy**2 + Fz**2)
rcforc[contactID-1].append([Fx, Fy, Fz, Mx, My, Mz, time, Fres])
if len(line)==0:
temp=-5
f.close()
# Sort into better array
def column(matrix, i): #Extract column from array
return [row[i] for row in matrix]
def rclen(rc): # Find length of rcforc file
rclengths = np.zeros(len(rcforc))
for i in range(0,len(rcforc)):
rclengths[i] = len(column(rcforc[i],0))
return int(np.max(rclengths))
temp = np.zeros((ncontacts, 8, rclen(rcforc)))
for i in range(0,ncontacts):
if len(rcforc[i])>0:
for j in range(0,8):
temp[i,j,:] = column(rcforc[i],j)
return temp
def readnodout( Inputfilename, stringname = ''):
"""
Script to read nodout-files from LS-DYNA
Run as
nodout = readnodout('nodout')
nodoutID = readnodout('nodout', 'stringname')
Plot as
plt.plot(nodout[:,nodeID,variableID])
Variables :
0 : time
1 : nodal_point
2 : x-disp
3 : y-disp
4 : z-disp
5 : x-vel
6 : y-vel
7 : z-vel
8 : x-accl
9 : y-accl
10 : z-accl
11 : x-coor
12 : y-coor
13 : z-coor
"""
import numpy as np
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
# Read file to get node index heading
f = open(Inputfilename,'r')
temp = 0
val = []
while temp==0:
line = f.readline()
if '{BEGIN LEGEND}' in line:
temp=2
line = f.readline() # heading
while temp==2:
line = f.readline()
if '{END LEGEND}' in line:
temp=3
else:
val.append(line.strip())
elif 'n o d a l p r i n t o u t f o r' in line: # abort if no names
temp=1
# Read file to get number of elements
f = open(Inputfilename,'r')
temp = 0
nodes = []
while temp==0:
line = f.readline()
if 'nodal point' in line:
temp=2
nnode = 0
while temp==2:
line = f.readline()
if len(line.split()) == 0:
temp=3
else:
nnode += 1
nodes.append(int(line.split()[0]))
nodes = np.array(nodes)
f.close()
# Find match between element ID and index
selval = []
if len(val) == nnode: # Then all elements have title
for ID, v in enumerate(val):
if stringname in v:
selval.append(ID)
else:
for ID, v in enumerate(val): # Then we find matching node index
if stringname in v:
selval.append(np.where(nodes == int(v.split()[0]))[0][0])
if stringname == '':
values = []
# Read data
f = open(Inputfilename,'r')
temp = 0
while temp==0:
line = f.readline()
if 'n o d a l' in line: # Then we have a new time step
time = line.split()[-2]
vval = []
for i in range(0,2): dummy = f.readline()
for i in range(0,nnode):
line = f.readline()
nodeID = line.split()[0]
templist = list(chunks(line[10:],12))[:-1]
templist = [float(i) for i in np.concatenate(([float(time), nodeID], templist))]
vval.append(templist)
teststop = 0
for i in range(0,5):
dummy = f.readline()
teststop += len(dummy)
if teststop == 0:
temp=3 #end of file
values.append(vval)
if len(line) == 0:
temp=3
f.close()
if stringname == '': #then return nodout values
ret = | np.array(values) | numpy.array |
"""
sparse tables
=============
Might look like this:
level 1 level 2 level 3
columns columns columns
idx a b c d e f idx g h i j k l idx m n o p
___ _ _ _ _ _ _ ___ _ _ _ _ _ _
|_0_|_|_|_|_|_|_||_0_|_|_|_|_|_|_|
|_1_|_|_|_|_|_|_|
|_2_|_|_|_|_|_|_| ___ _ _ _ _ _ _
|_3_|_|_|_|_|_|_||_3_|_|_|_|_|_|_|
|_4_|_|_|_|_|_|_||_4_|_|_|_|_|_|_| ___ _ _ _ _
|_5_|_|_|_|_|_|_||_5_|_|_|_|_|_|_||_5_|_|_|_|_|
|_6_|_|_|_|_|_|_|
|_7_|_|_|_|_|_|_|
|_8_|_|_|_|_|_|_| ___ _ _ _ _ _ _
|_9_|_|_|_|_|_|_||_9_|_|_|_|_|_|_|
|10_|_|_|_|_|_|_||10_|_|_|_|_|_|_|
|11_|_|_|_|_|_|_| ___ _ _ _ _ _ _ ___ _ _ _ _
|12_|_|_|_|_|_|_||12_|_|_|_|_|_|_||12_|_|_|_|_|
|13_|_|_|_|_|_|_| ___ _ _ _ _ _ _
|14_|_|_|_|_|_|_||14_|_|_|_|_|_|_|
Can be represented in memory like this:
level 1 level 2 level 3
columns columns columns
idx a b c d e f idx g h i j k l idx m n o p
___ _ _ _ _ _ _ ___ _ _ _ _ _ _ ___ _ _ _ _
|_0_|_|_|_|_|_|_| |_0_|_|_|_|_|_|_| |_5_|_|_|_|_|
|_1_|_|_|_|_|_|_| |_3_|_|_|_|_|_|_| |12_|_|_|_|_|
|_2_|_|_|_|_|_|_| |_4_|_|_|_|_|_|_|
|_3_|_|_|_|_|_|_| |_9_|_|_|_|_|_|_|
|_4_|_|_|_|_|_|_| |10_|_|_|_|_|_|_|
|_6_|_|_|_|_|_|_| |12_|_|_|_|_|_|_|
|_7_|_|_|_|_|_|_| |14_|_|_|_|_|_|_|
|_8_|_|_|_|_|_|_|
|_9_|_|_|_|_|_|_|
|10_|_|_|_|_|_|_|
|11_|_|_|_|_|_|_|
|12_|_|_|_|_|_|_|
|13_|_|_|_|_|_|_|
|14_|_|_|_|_|_|_|
Written to tape-archive
table.tar
|_ level_1/idx
|_ level_1/column_a
|_ level_1/column_b
|_ level_1/column_c
|_ level_1/column_d
|_ level_1/column_e
|_ level_1/column_f
|_ level_2/idx
|_ level_2/column_g
|_ level_2/column_h
|_ level_2/column_i
|_ level_2/column_j
|_ level_2/column_k
|_ level_2/column_l
|_ level_3/idx
|_ level_3/column_m
|_ level_3/column_n
|_ level_3/column_o
|_ level_3/column_p
"""
import pandas as pd
import numpy as np
import tarfile
import io
import shutil
import tempfile
import os
IDX = "idx"
IDX_DTYPE = "<u8"
LEVEL_COLUMN_DELIMITER = "/"
FILEAME_TEMPLATE = "{:s}" + LEVEL_COLUMN_DELIMITER + "{:s}.{:s}"
DTYPES = [
"<u1",
"<u2",
"<u4",
"<u8",
"<i1",
"<i2",
"<i4",
"<i8",
"<f2",
"<f4",
"<f8",
]
# logical operations
# ==================
def intersection(list_of_lists_of_indices):
"""
Returns the common indices among the lists of indices.
Example
-------
[4, 5, 6] = intersection([[1,2,3,4,5,6], [3,4,5,6,7,8], [4,5,6,7,8,9,10]])
"""
inter = list_of_lists_of_indices[0]
for i in range(len(list_of_lists_of_indices)):
inter = np.intersect1d(inter, list_of_lists_of_indices[i])
return inter
def cut_level_on_indices(level, indices, column_keys=None):
"""
Returns a level (recarray) only containing the row-indices in 'indices'.
Parameters
----------
level : recarray
A level in a sparse table.
indices : list
The row-indices to be written to the output-level.
column_keys : list of strings (None)
When specified, only these columns will be in the output-level.
"""
if column_keys is None:
column_keys = list(level.dtype.names)
column_keys.append(IDX)
_part = {}
for column_key in column_keys:
_part[column_key] = level[column_key]
part_df = pd.DataFrame(_part)
del _part
common_df = pd.merge(
part_df,
pd.DataFrame(dict_to_recarray({IDX: indices})),
on=IDX,
how="inner",
)
del part_df
return common_df.to_records(index=False)
def cut_table_on_indices(table, common_indices, level_keys=None):
"""
Returns table but only with the rows listed in common_indices.
Parameters
----------
table : dict of recarrays.
The sparse numeric table.
common_indices : list of indices
The row-indices to cut on. Only row-indices in this list will go
in the output-table.
level_keys : list of strings (None)
When provided, the output-table will only contain these levels.
"""
if level_keys is None:
level_keys = list(table.keys())
out = {}
for level_key in level_keys:
out[level_key] = cut_level_on_indices(
level=table[level_key], indices=common_indices,
)
return out
def sort_table_on_common_indices(table, common_indices):
"""
Returns a table with all row-indices ordered same as common_indices.
table : dict of recarrays.
The table. But must be rectangular, i.e. not sparse.
common_indices : list of indices
The row-indices to sort by.
"""
common_order_args = | np.argsort(common_indices) | numpy.argsort |
"""
Produce calibrated light curves.
``SDTlcurve`` is a script that, given a list of cross scans from different
sources, is able to recognize calibrators and use them to convert the observed
counts into a density flux value in Jy.
"""
import os
import sys
import glob
import re
import warnings
import traceback
import configparser
import copy
import numpy as np
from astropy import log
import astropy.units as u
from scipy.optimize import curve_fit
from astropy.table import Table, Column
from .scan import Scan, list_scans
from .read_config import read_config, sample_config_file, get_config_file
from .fit import fit_baseline_plus_bell
from .io import mkdir_p
from .utils import standard_string, standard_byte, compare_strings
from .utils import HAS_STATSM, calculate_moments, scantype
try:
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
HAS_MPL = True
except ImportError:
HAS_MPL = False
CALIBRATOR_CONFIG = None
__all__ = ["CalibratorTable", "read_calibrator_config"]
def _constant(x, p):
return p
FLUX_QUANTITIES = {"Jy/beam": "Flux",
"Jy/pixel": "Flux Integral",
"Jy/sr": "Flux Integral"}
def _get_flux_quantity(map_unit):
try:
return FLUX_QUANTITIES[map_unit]
except Exception:
raise ValueError("Incorrect map_unit for flux conversion. Use one "
"of {}".format(list(FLUX_QUANTITIES.keys())))
def read_calibrator_config():
"""Read the configuration of calibrators in data/calibrators.
Returns
-------
configs : dict
Dictionary containing the configuration for each calibrator. Each key
is the name of a calibrator. Each entry is another dictionary, in one
of the following formats:
1) {'Kind' : 'FreqList', 'Frequencies' : [...], 'Bandwidths' : [...],
'Fluxes' : [...], 'Flux Errors' : [...]}
where 'Frequencies' is the list of observing frequencies in GHz,
'Bandwidths' is the list of bandwidths in GHz, 'Fluxes' is the list of
flux densities in Jy from the literature and 'Flux Errors' are the
uncertainties on those fluxes.
2) {'Kind' : 'CoeffTable', 'CoeffTable':
{'coeffs' : 'time, a0, a0e, a1, a1e, a2, a2e, a3, a3e\n2010.0,0 ...}}
where the 'coeffs' key contains a dictionary with the table of
coefficients a la Perley & Butler ApJS 204, 19 (2013), as a
comma-separated string.
See Also
--------
srttools.calibration.flux_function
Examples
--------
>>> calibs = read_calibrator_config() # doctest: +ELLIPSIS
INFO...
>>> calibs['DummyCal']['Kind']
'CoeffTable'
>>> 'coeffs' in calibs['DummyCal']['CoeffTable']
True
"""
flux_re = re.compile(r'^Flux')
curdir = os.path.dirname(__file__)
calibdir = os.path.join(curdir, 'data', 'calibrators')
calibrator_file_list = glob.glob(os.path.join(calibdir, '*.ini'))
configs = {}
for cfile in calibrator_file_list:
cparser = configparser.ConfigParser()
cparser.read(cfile)
log.info(f"Reading {cfile}")
if 'CoeffTable' not in list(cparser.sections()):
configs[cparser.get("Info", "Name")] = {"Kind": "FreqList",
"Frequencies": [],
"Bandwidths": [],
"Fluxes": [],
"Flux Errors": []}
for section in cparser.sections():
if not flux_re.match(section):
continue
configs[cparser.get("Info", "Name")]["Frequencies"].append(
float(cparser.get(section, "freq")))
configs[cparser.get("Info", "Name")]["Bandwidths"].append(
float(cparser.get(section, "bwidth")))
configs[cparser.get("Info", "Name")]["Fluxes"].append(
float(cparser.get(section, "flux")))
configs[cparser.get("Info", "Name")]["Flux Errors"].append(
float(cparser.get(section, "eflux")))
else:
configs[cparser.get("Info", "Name")] = \
{"CoeffTable": dict(cparser.items("CoeffTable")),
"Kind": "CoeffTable"}
return configs
def _get_calibrator_flux(calibrator, frequency, bandwidth=1, time=0):
global CALIBRATOR_CONFIG
log.info(f"Getting calibrator flux from {calibrator}")
if CALIBRATOR_CONFIG is None:
CALIBRATOR_CONFIG = read_calibrator_config()
calibrators = CALIBRATOR_CONFIG.keys()
for cal in calibrators:
if cal == calibrator:
calibrator = cal
break
else:
return None, None
conf = CALIBRATOR_CONFIG[calibrator]
# find closest value among frequencies
if conf["Kind"] == "FreqList":
idx = (np.abs(np.array(conf["Frequencies"]) - frequency)).argmin()
return conf["Fluxes"][idx], \
conf["Flux Errors"][idx]
elif conf["Kind"] == "CoeffTable":
return _calc_flux_from_coeffs(conf, frequency, bandwidth, time)
def _treat_scan(scan_path, plot=False, **kwargs):
scandir, sname = os.path.split(scan_path)
if plot and HAS_MPL:
outdir = os.path.splitext(sname)[0] + "_scanfit"
outdir = os.path.join(scandir, outdir)
mkdir_p(outdir)
try:
# For now, use nosave. HDF5 doesn't store meta, essential for
# this
scan = Scan(scan_path, norefilt=True, nosave=True, **kwargs)
except KeyError as e:
log.warning(
"Missing key. Bad file? {}: {}".format(sname, str(e))
)
return False, None
except Exception as e:
log.warning(
"Error while processing {}: {}".format(sname, str(e))
)
log.warning(traceback.format_exc())
return False, None
feeds = np.arange(scan['ra'].shape[1])
chans = scan.chan_columns()
chan_nums = np.arange(len(chans))
F, N = np.meshgrid(feeds, chan_nums)
F = F.flatten()
N = N.flatten()
rows = []
for feed, nch in zip(F, N):
channel = chans[nch]
ras = np.degrees(scan['ra'][:, feed])
decs = np.degrees(scan['dec'][:, feed])
els = np.degrees(scan['el'][:, feed])
azs = np.degrees(scan['az'][:, feed])
time = np.mean(scan['time'][:])
el = np.mean(els)
az = np.mean(azs)
source = scan.meta['SOURCE']
pnt_ra = np.degrees(scan.meta['RA'])
pnt_dec = np.degrees(scan.meta['Dec'])
frequency = scan[channel].meta['frequency']
bandwidth = scan[channel].meta['bandwidth']
temperature = scan[channel + '-Temp']
y = scan[channel]
# Fit for gain curves
x, _ = scantype(ras, decs, els, azs)
temperature_model, _ = \
fit_baseline_plus_bell(x, temperature, kind='gauss')
source_temperature = temperature_model['Bell'].amplitude.value
# Fit RA and/or Dec
x, scan_type = scantype(ras, decs)
model, fit_info = fit_baseline_plus_bell(x, y, kind='gauss')
try:
uncert = fit_info['param_cov'].diagonal() ** 0.5
except Exception:
message = fit_info['message']
warnings.warn(
"Fit failed in scan {s}: {m}".format(s=sname,
m=message))
continue
bell = model['Bell']
baseline = model['Baseline']
# pars = model.parameters
pnames = model.param_names
counts = model.amplitude_1.value
backsub = y - baseline(x)
moments = calculate_moments(backsub)
skewness = moments['skewness']
kurtosis = moments['kurtosis']
if scan_type.startswith("RA"):
fit_ra = bell.mean.value
fit_width = bell.stddev.value * np.cos(np.radians(pnt_dec))
fit_dec = None
ra_err = fit_ra * u.degree - pnt_ra
dec_err = None
fit_mean = fit_ra
fit_label = 'RA'
pnt = pnt_ra
elif scan_type.startswith("Dec"):
fit_ra = None
fit_dec = bell.mean.value
fit_width = bell.stddev.value
dec_err = fit_dec * u.degree - pnt_dec
ra_err = None
fit_mean = fit_dec
fit_label = 'Dec'
pnt = pnt_dec
else:
raise ValueError("Unknown scan type")
index = pnames.index("amplitude_1")
counts_err = uncert[index]
index = pnames.index("stddev_1")
width_err = uncert[index]
flux_density, flux_density_err = 0, 0
flux_over_counts, flux_over_counts_err = 0, 0
calculated_flux, calculated_flux_err = 0, 0
new_row = [scandir, sname, scan_type, source, channel, feed,
time, frequency, bandwidth, counts, counts_err,
fit_width, width_err,
flux_density, flux_density_err, el, az,
source_temperature,
flux_over_counts, flux_over_counts_err,
flux_over_counts, flux_over_counts_err,
calculated_flux, calculated_flux_err,
pnt_ra, pnt_dec, fit_ra, fit_dec, ra_err,
dec_err, skewness, kurtosis]
rows.append(new_row)
if plot and HAS_MPL:
fig = plt.figure("Fit information")
gs = GridSpec(2, 1, height_ratios=(3, 1))
ax0 = plt.subplot(gs[0])
ax1 = plt.subplot(gs[1], sharex=ax0)
ax0.plot(x, y, label="Data")
ax0.plot(x, bell(x),
label="Fit: Amp: {}, Wid: {}".format(counts, fit_width))
ax1.plot(x, y - bell(x))
ax0.axvline(fit_mean, label=fit_label + " Fit", ls="-")
ax0.axvline(pnt.to(u.deg).value, label=fit_label + " Pnt",
ls="--")
ax0.set_xlim([min(x), max(x)])
ax1.set_xlabel(fit_label)
ax0.set_ylabel("Counts")
ax1.set_ylabel("Residual (cts)")
ax0.legend()
ax1.legend()
plt.savefig(os.path.join(outdir,
"Feed{}_chan{}.png".format(feed,
nch)))
plt.close(fig)
fig = plt.figure("Fit information - temperature")
gs = GridSpec(2, 1, height_ratios=(3, 1))
ax0 = plt.subplot(gs[0])
ax1 = plt.subplot(gs[1], sharex=ax0)
ax0.plot(x, temperature, label="Data")
ax0.plot(x, temperature_model['Bell'](x), label="Fit")
ax1.plot(x, temperature - temperature_model['Bell'](x))
ax0.axvline(pnt.to(u.deg).value, label=fit_label + " Pnt",
ls="--")
ax0.set_xlim([min(x), max(x)])
ax1.set_xlabel(fit_label)
ax0.set_ylabel("Counts")
ax1.set_ylabel("Residual (cts)")
plt.legend()
plt.savefig(os.path.join(outdir,
"Feed{}_chan{}_temp.png".format(feed,
nch)))
plt.close(fig)
return True, rows
class CalibratorTable(Table):
"""Table composed of fitted and tabulated fluxes."""
def __init__(self, *args, **kwargs):
"""Initialize the object."""
Table.__init__(self, *args, **kwargs)
self.calibration_coeffs = {}
self.calibration_uncerts = {}
self.calibration = {}
names = ["Dir", "File", "Scan Type", "Source",
"Chan", "Feed", "Time",
"Frequency", "Bandwidth",
"Counts", "Counts Err",
"Width", "Width Err",
"Flux", "Flux Err",
"Elevation", "Azimuth",
"Source_temperature",
"Flux/Counts", "Flux/Counts Err",
"Flux Integral/Counts", "Flux Integral/Counts Err",
"Calculated Flux", "Calculated Flux Err",
"RA", "Dec",
"Fit RA", "Fit Dec",
"RA err", "Dec err",
"Skewness", "Kurtosis"]
dtype = ['S200', 'S200', 'S200', 'S200',
'S200', np.int, np.double,
np.float, np.float,
np.float, np.float,
np.float, np.float,
np.float, np.float, np.float,
np.float, np.float,
np.float, np.float,
np.float, np.float,
np.float, np.float,
np.float, np.float,
np.float, np.float,
np.float, np.float,
np.float, np.float]
for n, d in zip(names, dtype):
if n not in self.keys():
self.add_column(Column(name=n, dtype=d))
def from_scans(self, scan_list=None, debug=False, freqsplat=None,
config_file=None, nofilt=False, plot=False):
"""Load source table from a list of scans.
For each scan, a fit is performed. Since we are assuming point-like
sources here, the fit is a Gaussian plus a slope. The centroid, width
and amplitude of the fit fill out new rows of the CalibratorTable
('Fit RA' or 'Fit Dec', 'Width' and 'Counts' respectively).
Parameters
----------
scan_list : list of str
List of files containing cross scans to be fitted
config_file : str
File containing the configuration (list of directories etc.)
Other parameters
----------------
debug : bool
Throw debug information
freqsplat : str
List of frequencies to be merged into one. See
:func:`srttools.scan.interpret_frequency_range`
nofilt : bool
Do not filter the noisy channels of the scan. See
:class:`srttools.scan.clean_scan_using_variability`
plot : bool
Plot diagnostic plots? Default False, True if debug is True.
Returns
-------
retval : bool
True if at least one scan was correctly processed
See Also
--------
srttools.scan.interpret_frequency_range
"""
if debug is True:
plot = True
if scan_list is None:
if config_file is None:
config_file = get_config_file()
config = read_config(config_file)
scan_list = \
list_scans(config['datadir'],
config['list_of_directories']) + \
list_scans(config['datadir'],
config['calibrator_directories'])
scan_list.sort()
nscan = len(scan_list)
out_retval = False
for i_s, s in enumerate(scan_list):
log.info('{}/{}: Loading {}'.format(i_s + 1, nscan, s))
retval, rows = _treat_scan(s, plot=plot, debug=debug,
freqsplat=freqsplat, nofilt=nofilt)
if retval:
out_retval = True
for r in rows:
self.add_row(r)
return out_retval
def write(self, fname, *args, **kwargs):
"""Same as Table.write, but adds path information for HDF5."""
if fname.endswith('.hdf5'):
super(CalibratorTable, self).write(fname, *args,
**kwargs)
else:
super(CalibratorTable, self).write(fname, *args, **kwargs)
def check_not_empty(self):
"""Check that table is not empty.
Returns
-------
good : bool
True if all checks pass, False otherwise.
"""
if len(self["Flux/Counts"]) == 0:
warnings.warn("The calibrator table is empty!")
return False
return True
def check_up_to_date(self):
"""Check that the calibration information is up to date.
Returns
-------
good : bool
True if all checks pass, False otherwise.
"""
if not self.check_not_empty():
return False
if np.any(self["Flux/Counts"] == 0):
warnings.warn("The calibrator table needs an update!")
self.update()
return True
def update(self):
"""Update the calibration information.
Execute ``get_fluxes``, ``calibrate`` and
``compute_conversion_function``
"""
if not self.check_not_empty():
return
self.get_fluxes()
self.calibrate()
self.compute_conversion_function()
def get_fluxes(self):
"""Get the tabulated flux of the source, if listed as calibrators.
Updates the table.
"""
if not self.check_not_empty():
return
for it, t in enumerate(self['Time']):
source = self['Source'][it]
frequency = self['Frequency'][it] / 1000
bandwidth = self['Bandwidth'][it] / 1000
flux, eflux = \
_get_calibrator_flux(source, frequency, bandwidth, time=t)
self['Flux'][it] = flux
self['Flux Err'][it] = eflux
def calibrate(self):
"""Calculate the calibration constants.
The following conversion functions are calculated for each tabulated
cross scan belonging to a calibrator:
+ 'Flux/Counts' and 'Flux/Counts Err': Tabulated flux density divided
by the _height_ of the fitted Gaussian. This is used, e.g. to
calibrate images in Jy/beam, as it calibrates the local amplitude to
the flux density
+ 'Flux Integral/Counts' and 'Flux Integral/Counts Err': Tabulated flux
density divided by the _volume_ of the 2D Gaussian corresponding to
the fitted cross scans, assuming a symmetrical beam (which is
generally not the case, but a good approximation). This is used,
e.g., to perform the calibration in Jy/pixel: Each pixel will be
normalized to the expected total flux in the corresponding pixel
area
See Also
--------
srttools.calibration.CalibratorTable.from_scans
"""
if not self.check_not_empty():
return
flux = self['Flux'] * u.Jy
eflux = self['Flux Err'] * u.Jy
counts = self['Counts'] * u.ct
ecounts = self['Counts Err'] * u.ct
width = np.radians(self['Width']) * u.radian
ewidth = np.radians(self['Width Err']) * u.radian
# Volume in a beam: For a 2-d Gaussian with amplitude A and sigmas sx
# and sy, this is 2 pi A sx sy.
total = 2 * np.pi * counts * width ** 2
etotal = 2 * np.pi * ecounts * width ** 2
flux_integral_over_counts = flux / total
flux_integral_over_counts_err = \
(etotal / total + eflux / flux +
2 * ewidth / width) * flux_integral_over_counts
flux_over_counts = flux / counts
flux_over_counts_err = \
(ecounts / counts + eflux / flux) * flux_over_counts
self['Flux/Counts'][:] = \
flux_over_counts.to(u.Jy / u.ct).value
self['Flux/Counts Err'][:] = \
flux_over_counts_err.to(u.Jy / u.ct).value
self['Flux Integral/Counts'][:] = \
flux_integral_over_counts.to(u.Jy / u.ct / u.steradian).value
self['Flux Integral/Counts Err'][:] = \
flux_integral_over_counts_err.to(u.Jy / u.ct / u.steradian).value
def compute_conversion_function(self, map_unit="Jy/beam", good_mask=None):
"""Compute the conversion between Jy and counts.
Try to get a meaningful second-degree polynomial fit over elevation.
Revert to the rough function :func:`Jy_over_counts_rough` in case
``statsmodels`` is not installed. In this latter case, only the baseline
value is given for flux conversion and error.
These values are saved in the ``calibration_coeffs`` and
``calibration_uncerts`` attributes of ``CalibratorTable``, and a
dictionary called ``calibration`` is also created. For each channel,
this dictionary contains either None or an object. This object is the
output of a ``fit`` procedure in ``statsmodels``. The method
object.predict(X) returns the calibration corresponding to elevation X.
"""
if not HAS_STATSM:
channels = list(set(self["Chan"]))
for channel in channels:
fc, fce = self.Jy_over_counts_rough(channel=channel,
map_unit=map_unit,
good_mask=None)
self.calibration_coeffs[channel] = [fc, 0, 0]
self.calibration_uncerts[channel] = \
[fce, 0, 0]
self.calibration[channel] = None
return
else:
import statsmodels.api as sm
if good_mask is None:
good_mask = self['Flux'] > 0
flux_quantity = _get_flux_quantity(map_unit)
channels = list(set(self["Chan"]))
for channel in channels:
good_chans = (self["Chan"] == channel) & good_mask
f_c_ratio = self[flux_quantity + "/Counts"][good_chans]
f_c_ratio_err = self[flux_quantity + "/Counts Err"][good_chans]
elvs = np.radians(self["Elevation"][good_chans])
good_fc = (f_c_ratio == f_c_ratio) & (f_c_ratio > 0)
good_fce = (f_c_ratio_err == f_c_ratio_err) & (f_c_ratio_err >= 0)
good = good_fc & good_fce
x_to_fit = np.array(elvs[good])
y_to_fit = np.array(f_c_ratio[good])
ye_to_fit = np.array(f_c_ratio_err[good])
order = np.argsort(x_to_fit)
x_to_fit = x_to_fit[order]
y_to_fit = y_to_fit[order]
ye_to_fit = ye_to_fit[order]
X = np.column_stack((np.ones(len(x_to_fit)), x_to_fit))
# X = np.c_[np.ones(len(x_to_fit)), X]
# X = sm.add_constant(X)
model = sm.RLM(y_to_fit, X, missing='drop')
results = model.fit()
self.calibration_coeffs[channel] = results.params
self.calibration_uncerts[channel] = \
results.cov_params().diagonal()**0.5
self.calibration[channel] = results
def Jy_over_counts(self, channel=None, elevation=None,
map_unit="Jy/beam", good_mask=None):
"""Compute the Jy/Counts conversion corresponding to a given map unit.
Parameters
----------
channel : str
Channel name (e.g. 'Feed0_RCP', 'Feed0_LCP' etc.)
elevation : float or array-like
The elevation or a list of elevations
map_unit : str
A valid unit for the calibrated map (See the keys of
FLUX_QUANTITIES)
good_mask : array of bools, default None
This mask can be used to specify the valid entries of the table.
If None, the mask is set to an array of True values
Returns
-------
fc : float or array-like
One conversion value for each elevation
fce : float or array-like
the uncertainties corresponding to each ``fc``
"""
rough = False
if not HAS_STATSM:
rough = True
if good_mask is None:
good_mask = self['Flux'] > 0
flux_quantity = _get_flux_quantity(map_unit)
if channel not in self.calibration.keys():
self.compute_conversion_function(map_unit, good_mask=good_mask)
if elevation is None or rough is True or channel is None:
elevation = np.array(elevation)
fc, fce = self.Jy_over_counts_rough(channel=channel,
map_unit=map_unit,
good_mask=good_mask)
if elevation.size > 1:
fc = np.zeros_like(elevation) + fc
fce = np.zeros_like(elevation) + fce
return fc, fce
X = np.column_stack((np.ones(np.array(elevation).size),
np.array(elevation)))
fc = self.calibration[channel].predict(X)
goodch = self["Chan"] == channel
good = good_mask & goodch
fce = np.sqrt(np.mean(
self[flux_quantity + "/Counts Err"][good]**2)) + np.zeros_like(fc)
if len(fc) == 1:
fc, fce = fc[0], fce[0]
return fc, fce
def Jy_over_counts_rough(self, channel=None, map_unit="Jy/beam",
good_mask=None):
"""Get the conversion from counts to Jy.
Other parameters
----------------
channel : str
Name of the data channel
map_unit : str
A valid unit for the calibrated map (See the keys of
FLUX_QUANTITIES)
good_mask : array of bools, default None
This mask can be used to specify the valid entries of the table.
If None, the mask is set to an array of True values
Returns
-------
fc : float
flux density /count ratio
fce : float
uncertainty on ``fc``
"""
self.check_up_to_date()
flux_quantity = _get_flux_quantity(map_unit)
if good_mask is None:
good_mask = self['Flux'] > 0
good_chans = np.ones(len(self["Time"]), dtype=bool)
if channel is not None:
good_chans = self['Chan'] == channel
good_chans = good_chans & good_mask
f_c_ratio = self[flux_quantity + "/Counts"][good_chans]
f_c_ratio_err = self[flux_quantity + "/Counts Err"][good_chans]
times = self["Time"][good_chans]
good_fc = (f_c_ratio == f_c_ratio) & (f_c_ratio > 0)
good_fce = (f_c_ratio_err == f_c_ratio_err) & (f_c_ratio_err >= 0)
good = good_fc & good_fce
x_to_fit = np.array(times[good])
y_to_fit = np.array(f_c_ratio[good])
ye_to_fit = np.array(f_c_ratio_err[good])
p = [np.median(y_to_fit)]
pcov = np.array([[np.median(ye_to_fit)**2]])
first = True
print(x_to_fit, y_to_fit, ye_to_fit)
while 1:
bad = np.abs((y_to_fit - _constant(x_to_fit, p)) / ye_to_fit) > 5
if not np.any(bad) and not first:
break
if len(x_to_fit[bad]) > len(x_to_fit) - 5:
warnings.warn("Calibration fit is shaky")
break
xbad = x_to_fit[bad]
ybad = y_to_fit[bad]
for xb, yb in zip(xbad, ybad):
log.warning("Outliers: {}, {}".format(xb, yb))
good = np.logical_not(bad)
x_to_fit = x_to_fit[good]
y_to_fit = y_to_fit[good]
ye_to_fit = ye_to_fit[good]
p, pcov = curve_fit(_constant, x_to_fit, y_to_fit, sigma=ye_to_fit,
p0=p)
first = False
fc = p[0]
fce = np.sqrt(pcov[0, 0])
return fc, fce
def calculate_src_flux(self, channel=None,
map_unit="Jy/beam", source=None):
"""Calculate source flux and error, pointing by pointing.
Uses the conversion factors calculated from the tabulated fluxes for
all sources but the current, and the fitted Gaussian amplitude for the
current source.
Updates the calibrator table and returns the average flux
Parameters
----------
channel : str or list of str
Data channel
map_unit : str
Units in the map (default Jy/beam)
source : str
Source name. Must match one of the sources in the table.
Default
Returns
-------
mean_flux : array of floats
Array with as many channels as the input ones
mean_flux_err : array of floats
Uncertainties corresponding to mean_flux
"""
if source is None:
good_source = np.ones_like(self['Flux'], dtype=bool)
else:
good_source = self['Source'] == source
non_source = np.logical_not(good_source)
if channel is None:
channels = [s for s in set(self['Chan'])]
else:
channels = [channel]
mean_flux = []
mean_flux_err = []
for channel in channels:
good_chan = self['Chan'] == channel
good = good_source & good_chan
elevation = np.radians(self['Elevation'][good])
fc, fce = self.Jy_over_counts(channel=channel, elevation=elevation,
map_unit=map_unit,
good_mask=non_source)
calculated_flux = copy.deepcopy(self['Calculated Flux'])
calculated_flux_err = copy.deepcopy(self['Calculated Flux Err'])
counts = np.array(self['Counts'])
counts_err = np.array(self['Counts Err'])
calculated_flux[good] = counts[good] * fc
calculated_flux_err[good] = \
(counts_err[good] / counts[good] + fce / fc) * \
calculated_flux[good]
self['Calculated Flux'][:] = calculated_flux
self['Calculated Flux Err'][:] = calculated_flux_err
mean_flux.append(np.mean(calculated_flux[good]))
mean_flux_err.append(
np.sqrt(np.mean(calculated_flux_err[good] ** 2)))
return mean_flux, mean_flux_err
def check_consistency(self, channel=None, epsilon=0.05):
"""Check the consistency of calculated and fitted flux densities.
For each source in the ``srttools``' calibrator list, use
``calculate_src_flux`` to calculate the source flux ignoring the
tabulated value, and compare the calculated and tabulated values.
Returns
-------
retval : bool
True if, for all calibrators, the tabulated and calculated values
of the flux are consistent. False otherwise.
"""
is_cal = (~np.isnan(self['Flux']))&(self['Flux'] > 0)
calibrators = list(set(self['Source'][is_cal]))
for cal in calibrators:
self.calculate_src_flux(channel=channel, source=cal)
if channel is None:
good_chan = np.ones_like(self['Chan'], dtype=bool)
else:
good_chan = self['Chan'] == channel
calc_fluxes = self['Calculated Flux'][is_cal & good_chan]
biblio_fluxes = self['Flux'][is_cal & good_chan]
names = self['Source'][is_cal & good_chan]
times = self['Time'][is_cal & good_chan]
consistent = \
np.abs(biblio_fluxes - calc_fluxes) < epsilon * biblio_fluxes
for n, t, b, c, cons, in zip(
names, times, biblio_fluxes, calc_fluxes, consistent):
if not cons:
warnings.warn("{}, MJD {}: Expected {}, "
"measured {}".format(n, t, b, c))
return consistent
def beam_width(self, channel=None):
"""Calculate the (weighted) mean beam width, in radians.
Checks for invalid (nan and such) values.
"""
goodch = np.ones(len(self), dtype=bool)
if channel is not None:
goodch = self['Chan'] == channel
allwidths = self[goodch]['Width']
allwidth_errs = self[goodch]['Width Err']
good = (allwidth_errs > 0) & (allwidth_errs == allwidth_errs)
allwidths = allwidths[good]
allwidth_errs = allwidth_errs[good]
# Weighted mean
width = | np.sum(allwidths/allwidth_errs) | numpy.sum |
# General Packages
from math import atan2, degrees
from datetime import datetime
from pathlib import Path
import time
import pprint
import numpy as np
import pandas as pd
import pickle
# Plotting
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from matplotlib.dates import date2num
import seaborn as sns
# Scaling
from sklearn.preprocessing import StandardScaler
settings = {
#
# audit settings
'data_name': 'credit',
'method_name': 'logreg',
'normalize_data': True,
'force_rational_actions': False,
#
# script flags
'audit_recourse': True,
'plot_audits': True,
'print_flag': True,
'save_flag': True,
'randomseed': 2338,
#
# placeholders
'method_suffixes': [''],
'audit_suffixes': [''],
}
# Paths
repo_dir = Path(__file__).absolute().parent.parent
paper_dir = repo_dir / 'paper/' # directory containing paper related info
data_dir = paper_dir / 'data/' # directory containing data files
results_dir = paper_dir / 'results/' # directory containing results
# create directories that don't exist
for d in [data_dir, results_dir]:
d.mkdir(exist_ok = True)
# Formatting Options
np.set_printoptions(precision = 4, suppress = False)
pd.set_option('display.max_columns', 30)
pd.options.mode.chained_assignment = None
pp = pprint.PrettyPrinter(indent = 4)
# Plotting Settings
sns.set(style="white", palette="muted", color_codes = True)
plt.rcParams['font.size'] = 20
plt.rcParams['axes.labelsize'] = 24
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.right'] = False
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.rc('legend', fontsize = 20)
# file names
output_dir = results_dir / settings['data_name']
output_dir.mkdir(exist_ok = True)
if settings['normalize_data']:
settings['method_suffixes'].append('normalized')
if settings['force_rational_actions']:
settings['audit_suffixes'].append('rational')
# set file header
settings['dataset_file'] = '%s/%s_processed.csv' % (data_dir, settings['data_name'])
settings['file_header'] = '%s/%s_%s%s' % (output_dir, settings['data_name'], settings['method_name'], '_'.join(settings['method_suffixes']))
settings['audit_file_header'] = '%s%s' % (settings['file_header'], '_'.join(settings['audit_suffixes']))
settings['model_file'] = '%s_models.pkl' % settings['file_header']
settings['audit_file'] = '%s_audit_results.pkl' % settings['audit_file_header']
# Recourse Objects
from recourse.action_set import ActionSet
from recourse.builder import RecourseBuilder
from recourse.auditor import RecourseAuditor
from recourse.flipset import Flipset
### Helper Functions for Experimental Script
def load_data():
"""Helper function to load in data, and output that and optionally a scaler object:
Output:
data: dict with the following fields
outcome_name: Name of the outcome variable (inferred as the first column.)
variable_names: A list of names indicating input columns.
X: The input features for our model.
y: The column of the dataframe indicating our outcome variable.
scaler: The sklearn StandardScaler used to normalize the dataset, if we wish to scale.
X_scaled: Scaled version of X, if we wish to scale
X_train: The training set: set to the whole dataset if not scaled. Set to X_scaled if we do scale.
scaler:
Object used to scale data. If "scale" is set to None, then this is returned as None.
"""
# data set
data_df = pd.read_csv(settings['dataset_file'])
data = {
'outcome_name': data_df.columns[0],
'variable_names': data_df.columns[1:].tolist(),
'X': data_df.iloc[:, 1:],
'y': data_df.iloc[:, 0]
}
scaler = None
data['X_train'] = data['X']
data['scaler'] = None
if settings['normalize_data']:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler(copy=True, with_mean=True, with_std=True)
data['X_scaled'] = pd.DataFrame(scaler.fit_transform(data['X'].to_numpy(dtype=float), data['y'].values),
columns=data['X'].columns)
data['X_train'] = data['X_scaled']
data['scaler'] = scaler
return data, scaler
def undo_coefficient_scaling(clf = None, coefficients = None, intercept = 0.0, scaler = None):
"""
given coefficients and data for scaled data, returns coefficients and intercept for unnormalized data
w = w_scaled / sigma
b = b_scaled - (w_scaled / sigma).dot(mu) = b_scaled - w.dot(mu)
:param sklearn linear classifier
:param coefficients: vector of coefficients
:param intercept: scalar for the intercept function
:param scaler: sklearn.Scaler or
:return: coefficients and intercept for unnormalized data
"""
if coefficients is None:
assert clf is not None
assert intercept == 0.0
assert hasattr(clf, 'coef_')
coefficients = clf.coef_
intercept = clf.intercept_ if hasattr(clf, 'intercept_') else 0.0
if scaler is None:
w = np.array(coefficients)
b = float(intercept)
else:
isinstance(scaler, StandardScaler)
x_shift = np.array(scaler.mean_)
x_scale = np.sqrt(scaler.var_)
w = coefficients / x_scale
b = intercept - | np.dot(w, x_shift) | numpy.dot |
import zipfile
import os
import PIL.Image
from typing import List, Tuple, Dict
import numpy as np
from deep500.utils.download import real_download, unrar
from deep500.lv2.dataset import FileListDataset
from deep500.utils.onnx_interop.losses import SoftmaxCrossEntropy
# Optionally import PyAV
try:
import av
except (ImportError, ModuleNotFoundError) as ex:
av = None
def ucf101_shape():
return (101, None, 3, 240, 320)
def ucf101_loss():
return SoftmaxCrossEntropy
def download_ucf101_and_get_file_paths(folder='', split='01'):
"""
Download ucf101 from University of Central Florida
The archive contains the videos of different action classes
:return: paths to different files
"""
base_url = "https://www.crcv.ucf.edu/data/UCF101/"
filenames = [('ucf101', 'UCF101.rar'),
('ucf101_split','UCF101TrainTestSplits-RecognitionTask.zip')]
sub_folder = '/ucf101'
local_files = real_download(base_url, filenames, sub_folder, output_dir=folder)
files = unrar(local_files['ucf101'])
zip = zipfile.ZipFile(local_files['ucf101_split'])
path = os.path.dirname(os.path.abspath(local_files['ucf101']))+'/UCF-101/'
train_files = []
with zip.open('ucfTrainTestlist/trainlist{}.txt'.format(split)) as file_split:
for line in file_split:
file = path + bytes.decode(line.split()[0])
if file in files:
train_files.append(file)
test_files = []
with zip.open('ucfTrainTestlist/testlist{}.txt'.format(split)) as file_split:
for line in file_split:
file = path + bytes.decode(line.strip())
if file in files:
test_files.append(file)
label_list = {}
with zip.open('ucfTrainTestlist/classInd.txt') as labels:
for line in labels:
line = bytes.decode(line.strip())
label = line.split()[1]
idx = int(line.split()[0]) - 1
label_list[label] = idx
return train_files, test_files, label_list
ucf101_mean = (0.39607886, 0.37930175, 0.351559)
ucf101_std = (0.28261574, 0.27613039, 0.28061599)
class ucf101_loader():
def __init__(self, normalize=True, max_length=1777, skip_frames=10):
if av is None:
raise ImportError('Cannot load ucf101 videos without PyAV. Please see '
'https://github.com/mikeboers/PyAV for installation instructions.')
self.normalize = normalize
self.max_length = max_length
self.skip_frames = skip_frames
def _video_loader(self, video_path):
container = av.open(video_path)
container.streams.video[0].thread_type = 'AUTO'
_data = [frame.to_ndarray(format='rgb24') for frame in container.decode(video=0)]
if _data[0].shape != (240, 320, 3):
_data = [np.array(PIL.Image.fromarray(img, 'RGB').resize((320,240))) for img in _data]
_data = np.asarray(_data, dtype=np.float32)
if self.normalize:
for ch in range(3):
_data[:,:,:,ch] -= ucf101_mean[ch]
_data[:,:,:,ch] /= ucf101_std[ch]
return _data
def __call__(self, data_path):
#load multiple videos
if type(data_path) is np.ndarray:
data = [self._video_loader(path) for path in data_path]
max_frames = max([x.shape[0] for x in data])
data = [np.pad(x, ((max_frames-x.shape[0],0), (0,0), (0,0), (0,0)), 'constant') for x in data]
data = np.vstack([ | np.expand_dims(x, axis=0) | numpy.expand_dims |
#!/home/daniel/anaconda3/bin/python
# -*- coding: utf-8 -*-
"""
================================================
main_extract_trt
================================================
This program extracts individual TRT cell data from the original files
and puts it in a separate file for each cell
"""
# Author: fvj
# License: BSD 3 clause
import datetime
import argparse
import atexit
import os
import numpy as np
from pyrad.io import get_trtfile_list, read_trt_data, write_trt_cell_data
print(__doc__)
def main():
"""
"""
# parse the arguments
parser = argparse.ArgumentParser(
description='Entry to Pyrad processing framework')
# positional arguments
parser.add_argument(
'start_times', type=str,
help=('Start times of the data to process. Format YYYYMMDDhhmmss.' +
'Coma separated'))
parser.add_argument(
'end_times', type=str,
help=('End times of the data to process. Format YYYYMMDDhhmmss.' +
'Coma separated'))
# keyword arguments
parser.add_argument(
'--raw_trtbase', type=str,
default='/store/msrad/radar/rad4alp/TRT/',
help='name of folder containing the TRT cell data')
parser.add_argument(
'--proc_trtbase', type=str,
default='/store/msrad/radar/trt/',
help='name of folder containing the TRT cell data')
parser.add_argument(
'--nsteps_min', type=int,
default=3,
help=('Minimum number of time steps to consider the TRT cell ' +
'worth processing'))
args = parser.parse_args()
print("====== TRT cell extraction started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== TRT cell extraction finished: ")
start_time_list = args.start_times.split(',')
end_time_list = args.end_times.split(',')
for i, start_time_str in enumerate(start_time_list):
end_time_str = end_time_list[i]
starttime = datetime.datetime.strptime(start_time_str, '%Y%m%d%H%M%S')
endtime = datetime.datetime.strptime(end_time_str, '%Y%m%d%H%M%S')
data_input_path = (
args.raw_trtbase+starttime.strftime('%y%j/TRTC%y%j/'))
data_output_path = (
args.proc_trtbase+starttime.strftime('%Y-%m-%d')+'/TRTC_cell/')
if not os.path.isdir(data_output_path):
os.makedirs(data_output_path)
flist = get_trtfile_list(data_input_path, starttime, endtime)
if flist is None:
continue
traj_ID = | np.array([], dtype=int) | numpy.array |
import numpy as np
import pandas as pd
import itertools
import sys
import math
import Constant
class Parameter:
USER_NUM_CONST = 40
CAPACITY_CONST = 10
def __init__(self, seed):
np.random.seed(seed)
self.USER_NUM = Parameter.USER_NUM_CONST
self.CAPACITY = Parameter.CAPACITY_CONST
df_server = pd.read_csv("../../network/kanto.csv")
self.SERVER_NUM = len(df_server)
def create_input(self):
self.e_u = list(itertools.product(range(self.USER_NUM), range(self.SERVER_NUM)))
self.e_s = list(itertools.combinations(list(range(0, self.SERVER_NUM)), 2))
self.m_s = np.full(self.SERVER_NUM, self.CAPACITY)
df_server = pd.read_csv("../../network/kanto.csv")
self.d_st = self.get_d_st(df_server)
self.d_us = self.get_d_us(df_server)
def get_d_st(self, df):
d_st = []
for link in self.e_s:
city_1, city_2 = link[0], link[1]
x_1, y_1 = df.iloc[city_1]["latitude"], df.iloc[city_1]["longitude"]
x_2, y_2 = df.iloc[city_2]["latitude"], df.iloc[city_2]["longitude"]
d_st.append(Parameter.get_distance(x_1, y_1, x_2, y_2))
return np.array(d_st)
def get_lower_and_upper(df, col):
return df[col].min() - 0.3, df[col].max() + 0.3
def get_d_us(self, df_server):
# range
lati_lower, lati_upper = Parameter.get_lower_and_upper(df_server, "latitude")
longi_lower, longi_upper = Parameter.get_lower_and_upper(df_server, "longitude")
# create users location
lati_array = (lati_upper - lati_lower) * | np.random.rand(self.USER_NUM) | numpy.random.rand |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 18 12:05:56 2018
@author: fabrizio
"""
import os
import os.path
import numpy as np
import sys
from prody import execDSSP
from Amino import Amino as aminoacido
from Protein import Protein as proteina
from DSSPData import DSSPData as DSSP
from keras.utils import to_categorical
class PdbParser:
def __init__(self):
#default folders with pdbs and (saved) DSSPs:
self.pdbdir='data/cullpdb/pdbs/'
self.dsspdir='data/cullpdb/dssp/'
if not os.path.exists(self.dsspdir):
os.makedirs(self.dsspdir)
def convertSomePDBtoDSSP(self,quantity):
#converts a given number of pdb to DSSP
for root, dirs, files in os.walk(self.pdbdir):
for filename in files:
quantity-=1
if(quantity<0):
break
print(quantity,' )',filename,' converted to dssp')
# here uses from prody import execDSSP
execDSSP(self.pdbdir+filename,outputdir=self.dsspdir)
break
def convertiSinglePDBtoDSSP(self,file): # con estensione
if(os.path.isfile(self.pdbdir+file)):
execDSSP(self.pdbdir+file,outputdir=self.dsspdir)
else:
print("File "+self.pdbdir+file+" not found..\n")
def getDSSPInfo(self,file):
# for debug
if(os.path.isfile(self.dsspdir+file)):
print('-'*7,'DSSP ',file,'-'*7)
dsspData = DSSP()
dsspData.parseDSSP(self.dsspdir+file)
dsspACC = np.array(dsspData.getACC())
print('ACC: ',dsspACC)
print(dsspACC.shape)
getAAs = np.array(dsspData.getAAs())
print('ACC: ',getAAs)
print(getAAs.shape)
if(dsspACC.shape[0]>700):
return 0
else:
return 1
else:
print('dssp not found..searching in pdbs folder')
self.convertiSinglePDBtoDSSP(file.replace('dssp','pdb'))
if(os.path.isfile(self.dsspdir+file)):
self.getDSSPInfo(file)
else:
print('file not found: ',file)
def getAllDSSPInfo(self,c=8000):
count=0
for root, dirs, files in os.walk(self.dsspdir):
for filename in files:
#count += self.getDSSPInfo(filename)
dssps = self.extractSSfromDSSP(filename)
print([x[1] for x in dssps])
count +=1
if(count>=c):
break
break
print('count: ',count)
def convertDSSPtoSample(self,file):
if(os.path.isfile(self.dsspdir+file)):
#print('-'*7,'CONVERTING DSSP ',file,' TO SAMPLE','-'*7)
primaryArray=['A', 'C', 'E', 'D', 'G', 'F', 'I', 'H', 'K', 'M', 'L', 'N', 'Q', 'P', 'S', 'R', 'T', 'W', 'V', 'Y', 'X']
secondaryArray=['L', 'B', 'E', 'G', 'I', 'H', 'S', 'T']
dssp = self.extractSSfromDSSP(file)
dsspData = DSSP()
dsspData.parseDSSP(self.dsspdir+file)
dsspPrimary = np.array(dsspData.getAAs())
dsspSecondary = | np.array([x[1] for x in dssp]) | numpy.array |
"""deals with data for project"""
import re
import json
import os
import sys
import math
import random
import tarfile
import numpy as np
import pandas as pd
from PIL import Image
from filecmp import dircmp
from collections import deque
from Reco3D.third_party import binvox_rw
from Reco3D.lib import utils, dataset
from sklearn import model_selection
from keras.utils import to_categorical
from numpy.random import randint, permutation, shuffle
from natsort import natsorted
from skimage.transform import resize
import boto3
def load_obj_id(obj_id):
data_path, label_path = id_to_path(obj_id)
return load_imgs_from_dir(data_path), np.squeeze(load_voxs_from_dir(label_path))
def id_to_path(obj_id, data_dir="./data/ShapeNetRendering/", label_dir="./data/ShapeNetVox32/"):
regex = re.search("(.*)_(.*)", obj_id)
ret_1 = os.path.join(data_dir, regex.group(1), regex.group(2))
ret_2 = os.path.join(label_dir, regex.group(1), regex.group(2))
return ret_1, ret_2
def resize_img(img):
max_size = max(img.size)
ratio = 137/max_size
size = tuple([int(x*ratio) for x in img.size])
img.thumbnail(size,Image.ANTIALIAS)
# loading functions
def load_img(img_path):
img = Image.open(img_path)
if img.size != (137, 137, 137):
resize_img(img)
return np.array(img)
def load_vox(vox_path):
with open(vox_path, 'rb') as f:
voxel = binvox_rw.read_as_3d_array(f).data
#if False:
#if np.shape(voxel) != (32, 32, 32):
# voxel = resize(voxel, (32, 32, 32), anti_aliasing=True,anti_aliasing_sigma=0.01)>0
return to_categorical(voxel)
def load_imgs(img_path_list):
assert(isinstance(img_path_list, (list, np.ndarray)))
ret = []
for p in img_path_list:
ret.append(load_img(p))
return np.stack(ret)
def load_voxs(vox_path_list):
assert(isinstance(vox_path_list, (list, np.ndarray)))
ret = []
for p in vox_path_list:
ret.append(load_vox(p))
return np.stack(ret)
def load_imgs_from_dir(img_dir):
img_path_list = construct_file_path_list_from_dir(img_dir, [".png"])
return load_imgs(img_path_list)
def load_voxs_from_dir(vox_dir):
vox_path_list = construct_file_path_list_from_dir(vox_dir, [".binvox"])
return load_voxs(vox_path_list)
# # dataset loading functions
def load_data(data_samples):
if isinstance(data_samples, str):
data_samples = [data_samples]
return load_imgs(data_samples)
def load_label(label_samples):
if isinstance(label_samples, str):
label_samples = [label_samples]
return np.squeeze(load_voxs(label_samples))
def load_data_Pix3D():
''' Read Pix3D data. This dataset includes real imageas and binvox'''
data_path = utils.read_params()['DIRS']['DATA_PIX3D']
data_all = sorted(construct_file_path_list_from_dir(data_path, ['_x.npy']))
label_all = sorted(construct_file_path_list_from_dir(data_path, ['_y.npy']))
return np.array(data_all), np.array(label_all)
def load_random_data_Pix3D():
data, label = load_data_Pix3D()
while True:
i = np.random.randint(0, len(data))
data_np, label_np = np.load(data[i]), np.load(label[i])
if data_np.shape[-1] == 3:
return data_np, label_np
# load preprocessed data and labels
def load_preprocessed_dataset():
data_preprocessed_dir = utils.read_params(
)["DIRS"]["DATA_PREPROCESSED"]
data_all = sorted(
dataset.construct_file_path_list_from_dir(data_preprocessed_dir, ["_x.npy"]))
label_all = sorted(
dataset.construct_file_path_list_from_dir(data_preprocessed_dir, ["_y.npy"]))
return np.array(data_all), np.array(label_all)
def load_random_sample():
data, label = load_preprocessed_dataset()
i = randint(0, len(data))
return np.load(data[i]), np.load(label[i])
def load_testset(model_dir):
try:
X_test = np.load(
"{}/X_test.npy".format(model_dir))
y_test = np.load(
"{}/y_test.npy".format(model_dir))
except:
model_dir = os.path.dirname(model_dir)
X_test = np.load(
"{}/X_test.npy".format(model_dir))
y_test = np.load(
"{}/y_test.npy".format(model_dir))
return X_test, y_test
def shuffle_batchs(data, label, batch_size):
# print(data, label, batch_size)
assert(len(data) == len(label))
num_of_batches = math.ceil(len(data)/batch_size)
perm = permutation(len(data))
data_batchs = np.array_split(data[perm], num_of_batches)
label_batchs = np.array_split(label[perm], num_of_batches)
return deque(data_batchs), deque(label_batchs)
def train_val_test_split(data, label, split=0.1):
# split into training and test set
X_train, X_test, y_train, y_test = model_selection.train_test_split(
data, label, test_size=split) # shuffled
# split of validation set
X_train, X_val, y_train, y_val = model_selection.train_test_split(
X_train, y_train, test_size=split) # shuffled
return X_train, y_train, X_val, y_val, X_test, y_test
def setup_dir():
params = utils.read_params()
DIR = params["DIRS"]
for d in DIR.values():
if 'ikea' in d: continue
utils.make_dir(d)
def construct_file_path_list_from_dir(dir, file_filter):
if isinstance(file_filter, str):
file_filter = [file_filter]
paths = [[] for _ in range(len(file_filter))]
for root, _, files in os.walk(dir):
for f_name in files:
for i, f_substr in enumerate(file_filter):
if f_substr in f_name:
(paths[i]).append(root + '/' + f_name)
for i, p in enumerate(paths):
paths[i] = natsorted(p)
if len(file_filter) == 1:
return paths[0]
return tuple(paths)
def create_path_csv(data_dir, label_dir):
print("creating path csv for {} and {}".format(data_dir, label_dir))
params = utils.read_params()
common_paths = []
for dir_top, subdir_cmps in dircmp(data_dir, label_dir).subdirs.items():
for dir_bot in subdir_cmps.common_dirs:
common_paths.append(os.path.join(dir_top, dir_bot))
mapping = pd.DataFrame(common_paths, columns=["common_dirs"])
mapping['data_dirs'] = mapping.apply(
lambda data_row: os.path.join(data_dir, data_row.common_dirs), axis=1)
mapping['label_dirs'] = mapping.apply(
lambda data_row: os.path.join(label_dir, data_row.common_dirs), axis=1)
table = []
for n, d, l in zip(common_paths, mapping.data_dirs, mapping.label_dirs):
data_row = [os.path.dirname(n)+"_"+os.path.basename(n)]
data_row += construct_file_path_list_from_dir(d, [".png"])
data_row += construct_file_path_list_from_dir(l, [".binvox"])
if not construct_file_path_list_from_dir(l, [".binvox"]):
continue
table.append(data_row)
paths = pd.DataFrame(table)
paths.to_csv("{}/paths.csv".format(params["DIRS"]["OUTPUT"]))
return paths
def download_from_link(link):
download_folder = os.path.splitext(os.path.basename(link))[0]
archive = download_folder + ".tgz"
if not os.path.isfile(archive):
os.system('wget -c {0}'.format(link))
os.system("tar -xvzf {0}".format(archive))
os.rename(download_folder, "data/{}".format(download_folder))
# os.system("rm -f {0}".format(archive))
def download_dataset():
LABEL_LINK = 'ftp://cs.stanford.edu/cs/cvgl/ShapeNetVox32.tgz'
DATA_LINK = "ftp://cs.stanford.edu/cs/cvgl/ShapeNetRendering.tgz"
if not os.path.isdir("data/ShapeNetVox32"):
download_from_link(LABEL_LINK)
if not os.path.isdir("data/ShapeNetRendering"):
download_from_link(DATA_LINK)
# download data from s3 bucket
def download_from_s3_folder(s3_bucket='shapenetv1'):
#s3_bucket_name = 'shapenetv1'
#s3 = boto3.resource('s3')
#print ("Downloading the data {} from s3 to {}".format("shapenetv1.tar", "./data"))
#s3.meta.client.download_file(s3_bucket, 'data/shapenetv1.tar', './data/shapenetv1.tar')
LINK = 'https://shapenetv1.s3-us-west-2.amazonaws.com/data/shapenetv1.tar'
os.system('wget -c {0} -P ./data'.format(LINK))
def prepare_dataset():
archive = 'data/shapenetv1.tar'
if not os.path.isfile(archive) and not os.path.isdir("data/ShapeNetVox32"):
download_from_s3_folder()
if not os.path.isdir("data/ShapeNetVox32") or not os.path.isdir('data/ShapeNetRendering') :
os.system("tar -xvzf {0} -C ./data/".format(archive))
def preprocess_dataset(is_high_res=False):
params = utils.read_params()
dataset_size = params["DATASET_SIZE"]
output_dir = params["DIRS"]["OUTPUT"]
data_preprocessed_dir = params["DIRS"]["DATA_PREPROCESSED"]
data_dir = params["DIRS"]["DATA"]
if not os.path.isfile("{}/paths.csv".format(output_dir)):
if is_high_res:
dataset.create_path_csv(
"{}/ShapeNetRendering".format(data_dir), "{}/ShapeNetVox64".format(data_dir))
else:
dataset.create_path_csv(
"{}/ShapeNetRendering".format(data_dir), "{}/ShapeNetVox32".format(data_dir))
path_list = pd.read_csv(
"{}/paths.csv".format(output_dir), index_col=0).as_matrix()
# randomly pick examples from dataset
shuffle(path_list)
if dataset_size <= 0 or dataset_size >= len(path_list):
dataset_size = len(path_list)
for i in range(dataset_size):
model_name = path_list[i, 0]
utils.to_npy('{}/{}_x'.format(data_preprocessed_dir, model_name),
load_data(path_list[i, 1:-1]))
utils.to_npy('{}/{}_y'.format(data_preprocessed_dir, model_name),
load_label(path_list[i, -1]))
def render_dataset(dataset_dir="ShapeNet", num_of_examples=None, render_count=24):
print("[load_dataset] loading from {0}".format(dataset_dir))
pathlist_tuple = construct_file_path_list_from_dir(
dataset_dir, ['.obj', '.mtl'])
pathlist = pathlist_tuple[0] # DANGER, RANDOM
pathlist = pathlist[:num_of_examples] if num_of_examples is not None else pathlist
render_list = []
for mesh_path in pathlist:
if not os.path.isfile(mesh_path):
continue
try:
mesh_obj = trimesh.load_mesh(mesh_path)
except:
print("failed to load {}".format(mesh_path))
continue
if isinstance(mesh_obj, list):
compund_mesh = mesh_obj.pop(0)
for m in mesh_obj:
compund_mesh += m
else:
compund_mesh = mesh_obj
render_dir = "./ShapeNet_Renders"
renders = os.path.dirname(
str.replace(mesh_path, dataset_dir, render_dir))
if os.path.isdir(renders) and os.listdir(renders) != []:
render_list.append(load_imgs_from_dir(renders))
else:
write_renders_to_disk(compund_mesh, renders, render_count)
render_list.append(load_imgs_from_dir(renders))
return render_list
def write_renders_to_disk(mesh, renders, render_count=10):
print("[write_renders_to_disk] writing renders to {0} ... ".format(
renders))
# FIXME: stupid but clean
os.system("rm -rf {}".format(renders))
utils.make_dir(renders)
scene = mesh.scene()
for i in range(render_count):
angle = math.radians(random.randint(15, 30))
axis = random.choice([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
rotate = trimesh.transformations.rotation_matrix(
angle, axis, scene.centroid)
camera_old, _geometry = scene.graph['camera']
camera_new = | np.dot(camera_old, rotate) | numpy.dot |
## Copyright 2020 <NAME>
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
###########################################################################
###########################################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
class Validation:
def __init__(self,config):
self.config = config
self.cut_off = config["cut_off"]
self.data = {}
self.result_dir = config["result_dir"]
def validate(self,model,data):
# model.to(device)
criterion_bce = nn.BCELoss()
# criterion = nn.BCEWithLogitsLoss()
criterion_mse = nn.MSELoss()
model.eval()
correct = 0
# false_safe = 0
under_approx = 0
over_approx = 0
total = 0
metric_mse = []
metric_bce = []
for i in range(data.n_all_batches):
state, safe = data.giveBatch(i)
safe_model = model(state).view(-1)
safe_model_max = (safe_model >= self.cut_off).type(torch.FloatTensor)
metric_mse.append(criterion_mse(safe_model, safe).item())
metric_bce.append(criterion_bce(safe_model, safe).item())
total += safe.size(0)
correct += (safe_model_max == safe).sum().item()
under_approx += (safe_model_max < safe).sum().item()
over_approx += (safe_model_max > safe).sum().item()
print('\tMSE: %.4f, BCE: %.4f, Acc: %.4f, UnderApprox: %.4f, OverApprox: %.4f'
% (np.mean(metric_mse), np.mean(metric_bce), correct / total, under_approx / total, over_approx / total))
self.data['full_set'] = []
self.data['full_set'].append({
'acc': correct / total,
'under': under_approx / total,
'over': over_approx / total,
'total': total,
'correct': correct,
'mse': np.mean(metric_mse),
'bce': np.mean(metric_bce)
})
def validateTest(self,model,data):
criterion_bce = nn.BCELoss()
criterion_mse = nn.MSELoss()
model.eval()
correct = 0
# false_safe = 0
under_approx = 0
over_approx = 0
total = 0
metric_mse = []
metric_bce = []
for i in range(data.n_train_batches,data.n_all_batches):
state, safe = data.giveBatch(i)
safe_model = model(state).view(-1)
safe_model_max = (safe_model >= self.cut_off).type(torch.FloatTensor)
metric_mse.append(criterion_mse(safe_model, safe).item())
metric_bce.append(criterion_bce(safe_model, safe).item())
total += safe.size(0)
correct += (safe_model_max == safe).sum().item()
under_approx += (safe_model_max < safe).sum().item()
over_approx += (safe_model_max > safe).sum().item()
print('\tMSE: %.4f, BCE: %.4f, Acc: %.4f, UnderApprox: %.4f, OverApprox: %.4f'
% (np.mean(metric_mse), | np.mean(metric_bce) | numpy.mean |
import os
import folium
# Importing explicitly the module from 'folium' - 'folium.plugins'
import folium.plugins as plugins
''' *************************************** Generating Folium Base Map **************************************'''
# Generating a 'Leaflet' map for the location in interest by passing through the coordinates
# Calling the 'folium.folium.Map' object
Site_Coord = [4.145825, 108.3035]
m_folium = folium.Map(location = Site_Coord,
zoom_start = 5)
''' *************************************** Adding Minimap onto Folium Base Map **************************************'''
# Activating the 'folium.plugins' to include a minimap at the bottomright of the main map
m_minimap_Batu_Kawan = plugins.MiniMap(toggle_display = True,
width=200,
height=200,
zoom_level_fixed=None,
minimized=True)
m_folium.add_child(m_minimap_Batu_Kawan)
''' ***************************** Extracting G&P Geotechnics Project with Coordinates *******************************'''
import pyexcel as pyex
import numpy as np
DATA = pyex.get_book(file_name = '2020_Gallery.xlsx')
''' ******************************************** 2008 **************************************************************'''
# Data extraction for projects secured in year 2008
DATA_2008 = np.array(DATA.sheet_by_name('2008'))
lat_2008 = np.ndarray.tolist(DATA_2008[1:,1])
lgn_2008 = np.ndarray.tolist(DATA_2008[1:,0])
pgn_2008 = np.ndarray.tolist(DATA_2008[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2008 = folium.FeatureGroup("2008 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2008, lgn_2008, pgn_2008):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2008.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='black',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2008)
''' ******************************************** 2009 **************************************************************'''
# Data extraction for projects secured in year 2009
DATA_2009 = np.array(DATA.sheet_by_name('2009'))
lat_2009 = np.ndarray.tolist(DATA_2009[1:,1])
lgn_2009 = np.ndarray.tolist(DATA_2009[1:,0])
pgn_2009 = np.ndarray.tolist(DATA_2009[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2009 = folium.FeatureGroup("2009 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2009, lgn_2009, pgn_2009):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2009.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='black',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2009)
''' ******************************************** 2010 **************************************************************'''
# Data extraction for projects secured in year 2010
DATA_2010 = np.array(DATA.sheet_by_name('2010'))
lat_2010 = np.ndarray.tolist(DATA_2010[1:,1])
lgn_2010 = np.ndarray.tolist(DATA_2010[1:,0])
pgn_2010 = np.ndarray.tolist(DATA_2010[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2010 = folium.FeatureGroup("2010 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2010, lgn_2010, pgn_2010):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2010.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='black',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2010)
''' ******************************************** 2011 **************************************************************'''
# Data extraction for projects secured in year 2011
DATA_2011 = np.array(DATA.sheet_by_name('2011'))
lat_2011 = np.ndarray.tolist(DATA_2011[1:,1])
lgn_2011 = np.ndarray.tolist(DATA_2011[1:,0])
pgn_2011 = np.ndarray.tolist(DATA_2011[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2011 = folium.FeatureGroup("2011 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2011, lgn_2011, pgn_2011):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2011.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='orange',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2011)
''' ******************************************** 2012 **************************************************************'''
# Data extraction for projects secured in year 2012
DATA_2012 = np.array(DATA.sheet_by_name('2012'))
lat_2012 = np.ndarray.tolist(DATA_2012[1:,1])
lgn_2012 = np.ndarray.tolist(DATA_2012[1:,0])
pgn_2012 = np.ndarray.tolist(DATA_2012[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2012 = folium.FeatureGroup("2012 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2012, lgn_2012, pgn_2012):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2012.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='orange',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2012)
''' ******************************************** 2013 **************************************************************'''
# Data extraction for projects secured in year 2013
DATA_2013 = np.array(DATA.sheet_by_name('2013'))
lat_2013 = np.ndarray.tolist(DATA_2013[1:,1])
lgn_2013 = np.ndarray.tolist(DATA_2013[1:,0])
pgn_2013 = np.ndarray.tolist(DATA_2013[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2013 = folium.FeatureGroup("2013 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2013, lgn_2013, pgn_2013):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2013.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='orange',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2013)
''' ******************************************** 2014 **************************************************************'''
# Data extraction for projects secured in year 2014
DATA_2014 = np.array(DATA.sheet_by_name('2014'))
lat_2014 = np.ndarray.tolist(DATA_2014[1:,1])
lgn_2014 = np.ndarray.tolist(DATA_2014[1:,0])
pgn_2014 = np.ndarray.tolist(DATA_2014[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2014 = folium.FeatureGroup("2014 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2014, lgn_2014, pgn_2014):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2014.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='orange',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2014)
''' ******************************************** 2015 **************************************************************'''
# Data extraction for projects secured in year 2015
DATA_2015 = np.array(DATA.sheet_by_name('2015'))
lat_2015 = np.ndarray.tolist(DATA_2015[1:,1])
lgn_2015 = np.ndarray.tolist(DATA_2015[1:,0])
pgn_2015 = np.ndarray.tolist(DATA_2015[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2015 = folium.FeatureGroup("2015 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2015, lgn_2015, pgn_2015):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2015.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='orange',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2015)
''' ******************************************** 2016 **************************************************************'''
# Data extraction for projects secured in year 2016
DATA_2016 = np.array(DATA.sheet_by_name('2016'))
lat_2016 = np.ndarray.tolist(DATA_2016[1:,1])
lgn_2016 = np.ndarray.tolist(DATA_2016[1:,0])
pgn_2016 = np.ndarray.tolist(DATA_2016[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2016 = folium.FeatureGroup("2016 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2016, lgn_2016, pgn_2016):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2016.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='red',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2016)
''' ******************************************** 2017 **************************************************************'''
# Data extraction for projects secured in year 2017
DATA_2017 = np.array(DATA.sheet_by_name('2017'))
lat_2017 = np.ndarray.tolist(DATA_2017[1:,1])
lgn_2017 = np.ndarray.tolist(DATA_2017[1:,0])
pgn_2017 = np.ndarray.tolist(DATA_2017[1:,3])
# Calling the class folium.map.FeatureGroup to group the places of interest in the LayerControl panel
feature_group_2017 = folium.FeatureGroup("2017 Projects")
for (lat_tooltip, long_tooltip, m_tooltip_label) in zip(lat_2017, lgn_2017, pgn_2017):
tooltip_Coord = [lat_tooltip, long_tooltip]
feature_group_2017.add_child(folium.Marker(location = tooltip_Coord,
icon = folium.Icon(color='red',icon='info-sign'),
popup = folium.Popup(m_tooltip_label, max_width=200, min_width=200)))
m_folium.add_child(feature_group_2017)
''' ******************************************** 2018 **************************************************************'''
# Data extraction for projects secured in year 2018
DATA_2018 = np.array(DATA.sheet_by_name('2018'))
lat_2018 = np.ndarray.tolist(DATA_2018[1:,1])
lgn_2018 = | np.ndarray.tolist(DATA_2018[1:,0]) | numpy.ndarray.tolist |
'''
Layers for NN-models (forward+backward pass).
Written by <NAME> (https://github.com/SLotAbr).
BSD License
'''
# from multiprocessing import Process
import numpy as np
import pickle
from tools.functions import string_softmax
from tools.optimizers import AdaM as AdaM
class token_embedding:
def __init__(self, vocabulary_size, d_model, context_size, optim_param):
self.TE_table = np.random.randn(vocabulary_size, d_model) * 1e-3
self.vocabulary_size = vocabulary_size
self.d_model = d_model
self.context_size = context_size
self.input_field = 0
self.optim = AdaM(optim_param)
def __call__(self, index_list):
# form X matrix from tokens indexes
# We should use 2D array for further concatenation
self.input_indexes = index_list
context =[[self.TE_table[j] for j in index_list]]
return np.concatenate(context, axis=1)
def update_weights(self, dX, dTE_linear):
# dTE_linear - the second part of TE derivative
# TE derivative have 2 parts - so, we'll get it by external source
dTE = np.zeros((self.vocabulary_size, self.d_model))
for i in range(self.context_size):
dTE[self.input_indexes[i]]+= dX[i]
dTE += dTE_linear
self.TE_table = self.optim.weights_update(self.TE_table, dTE)
def linear(self, x):
'''
using token_embeddings as linear layer with bias=0
we'll use it for finding out output token probabilities
:x.shape = [context_size; d_model]
:output.shape = [context_size; vocabulary_size]
'''
self.input_field = x
return [email protected]_table.T
def linear_backward(self, dl):
# returns derivatives for input signal and TE_table
return [email protected]_table, (self.input_field.T@dl).T
def save_weights(self, path):
with open(path, 'wb') as f:
pickle.dump([self.TE_table, self.optim], f)
def restore_weights(self, path):
with open(path, 'rb') as f:
self.TE_table, self.optim = pickle.load(f)
class linear:
def __init__(self, hidden_units, number_of_neurons, optim_param):
# mean = 0, var = 1
self.W = np.random.randn(hidden_units, number_of_neurons) * 1e-3
self.b = np.zeros(number_of_neurons)
self.input_field = 0 # Memory for backpropagation
self.w_optim = AdaM(optim_param)
self.b_optim = AdaM(optim_param)
def __call__(self, x):
self.input_field = x
return (x @ self.W + self.b) #np.dot(x, w) + b
def backward(self, dl):
dw = self.input_field.T @ dl
db = dl.sum(axis=0)
# Updating weights
self.W = self.w_optim.weights_update(self.W, dw)
self.b = self.b_optim.weights_update(self.b, db)
# returns dl for previous layers
return dl @ self.W.T
def save_weights(self, path):
with open(path, 'wb') as f:
pickle.dump([self.W,
self.b,
self.w_optim,
self.b_optim], f)
def restore_weights(self, path):
with open(path, 'rb') as f:
self.W, self.b, self.w_optim, self.b_optim = pickle.load(f)
class ReLU:
def __call__(self, x):
result = np.maximum(0, x)
self.mask = result>0
return result
def backward(self, dl):
return dl * self.mask
class LayerNormalization:
def __init__(self, context_size):
self.context_size = context_size
def __call__(self, x, phase='train'):
'''
I'll delete if-else construction and replace it more
eficient version for evaluation phase later.
There is the same construction in MH_attention_mechanism (__call__ field)
'''
if phase == 'train':
context_size = self.context_size
else:
context_size = x.shape[0]
x_mean = (x.mean(axis=1).reshape(1,context_size)).T
self.x_var = (x.var(axis=1).reshape(1,context_size)).T
return (x-x_mean)/np.sqrt(self.x_var+1e-12)
def backward(self, dl):
l_mean = (dl.mean(axis=1).reshape(1,self.context_size)).T
return (dl - l_mean)/np.sqrt(self.x_var+1e-12)
class MH_attention_mechanism:
def __init__(self, context_size, d_model, H):
self.d_k = 1/np.sqrt(d_model/H)
self.context_size = context_size
self.H = H
# matrix with 'True' values above the main diagonal
# We'll use it for replacing elements in dot product of Q and K
self.mask=(np.tril(np.ones((context_size, context_size)))==0)
self.backward_mask=np.tril(np.ones((context_size, context_size)))
def __call__(self, x, phase='train'):
self.Q, self.K, self.V = np.split(x, 3, axis=1)
self.Q = np.split(self.Q, self.H, axis=1)
self.K = np.split(self.K, self.H, axis=1)
self.V = np.split(self.V, self.H, axis=1)
# When we generate text ('eval phase'), context_size always different
if phase == 'train':
context_size = self.context_size
else:
context_size = x.shape[0]
# Replace it by pre-init fields for faster implementation?
C = [0 for h in range(self.H)]
self.S = [0 for h in range(self.H)]
Z = [0 for h in range(self.H)]
# https://docs.python.org/3/library/multiprocessing.html
for h in range(self.H):
# Attention formula
C[h] = self.Q[h] @ self.K[h].T * self.d_k
if phase == 'train':
C[h][self.mask]=-1e12
else:
# We've got different context_size during evaluation
mask = (np.tril(np.ones((context_size, context_size)))==0)
C[h][mask]=-1e12
self.S[h] = string_softmax(C[h], context_size)
# print('softmax\'s state:\n', self.S[h])
Z[h] = self.S[h]@self.V[h]
# print('Z\'s state:\n', Z[h])
return np.concatenate(Z, axis=1)
def backward(self, dl):
dZ = | np.split(dl, self.H, axis=1) | numpy.split |
################################################################################
# UNIVERSIDADE FEDERAL DE CATALÃO (UFCAT)
# <NAME>, ENG. CIVIL / PROF (UFCAT)
# <NAME> ENG. CIVIL / PROF (UFCAT)
# <NAME>, ENG. CIVIL (UFCAT)
################################################################################
################################################################################
# DESCRIÇÃO ALGORITMO:
# BIBLIOTECA DE DIMENSIONAMENTO DE VIGAS PRÉ-FABRICADAS E PROTENDIDAS DESENVOL-
# VIDA PELO GRUPO DE PESQUISA E ESTUDOS EM ENGENHARIA (GPEE)
################################################################################
################################################################################
# BIBLIOTECAS NATIVAS PYTHON
import numpy as np
################################################################################
# BIBLIOTECAS DESENVOLVEDORES GPEE
def PROP_GEOMETRICA_I(H, B_FS, B_FI, B_W, H_S, H_I, H_SI, H_II):
"""
Esta função determina as propriedades geométricas de uma seção I.
Entrada:
H | Altura da viga | m | float
B_FS | Base de mesa superior da viga | m | float
B_FI | Base de mesa inferior da viga | m | float
B_W | Base de alma da viga | m | float
H_S | Altura de mesa superior da viga | m | float
H_I | Altura de mesa inferior da viga | m | float
H_SI | Altura inclinada de mesa superior da viga | m | float
H_II | Altura inclinada de mesa inferior da viga | m | float
Saída:
A_C | Área da seção transversal da viga | m² | float
I_C | Inércia da viga | m^4 | float
Y_SUP | Ordenada da fibra superior | m | float
Y_INF | Ordenada da fibra inferior | m | float
W_SUP | Modulo de resistência superior | m³ | float
W_INF | Modulo de resistência inferior | m³ | float
"""
A_1 = B_W * H
A_2 = (B_FS - B_W) * H_S
A_3 = ((B_FS - B_W) * H_SI) / 2
A_4 = (B_FI - B_W) * H_I
A_5 = ((B_FI - B_W) * H_I)/2
A_C = A_1 + A_2 + A_3 + A_4 + A_5
Y_CG = (A_1 * (H/2) + A_2 * (H - H_S / 2) + A_3 * (H - H_S - H_SI / 2) + A_4 * (H_I / 2) + A_5 * (H_I + H_II)) / A_C
I_1 = (B_W * H**3) / 12 + A_1 * (H / 2 - Y_CG)**2
I_2 = ((B_FS - B_W)* H_S**3) / 12 + A_2 * (H - H_S/2 - Y_CG)**2
I_3 = ((B_FS - B_W)* H_SI**3) / 36 + A_3 * (H - H_S - H_SI / 3 - Y_CG)**2
I_4 = ((B_FI - B_W)* H_I**3) / 12 + A_4 * (Y_CG - H_I / 2)**2
I_5 = ((B_FI - B_W)* H_II**3) / 36 + A_5 * (Y_CG - H_I - H_II / 3)**2
I_C = I_1 + I_2 + I_3 + I_4 + I_5
Y_SUP = H - Y_CG
Y_INF = Y_CG
W_SUP = I_C / Y_SUP
W_INF = I_C / Y_INF
return A_C, I_C, Y_SUP, Y_INF, W_SUP, W_INF
def PROP_GEOMETRICA_RET(B_W, H):
"""
Esta função determina as propriedades geométricas de uma seção retangular.
Entrada:
B_W | Largura da viga | m | float
H | Altura da viga | m | float
Saída:
A_C | Área da seção transversal da viga | m² | float
I_C | Inércia da viga | m^4 | float
Y_SUP | Ordenada da fibra superior | m | float
Y_INF | Ordenada da fibra inferior | m | float
W_SUP | Modulo de resistência superior | m³ | float
W_INF | Modulo de resistência inferior | m³ | float
"""
A_C = B_W * H
I_C = (B_W * H ** 3) / 12
Y_SUP = H / 2
Y_INF = H / 2
W_SUP = I_C / Y_SUP
W_INF = I_C / Y_INF
return A_C, I_C, Y_SUP, Y_INF, W_SUP, W_INF
def FATOR_BETA1(TEMPO, CIMENTO):
"""
Esta função calcula o valor de BETA_1 que representa a função de
crescimento da resistência do cimento.
Entrada:
TEMPO | Tempo | dias | float
CIMENTO | Cimento utilizado | | string
| 'CP1' - Cimento portland 1 | |
| 'CP2' - Cimento portland 2 | |
| 'CP3' - Cimento portland 3 | |
| 'CP4' - Cimento portland 4 | |
| 'CP5' - Cimento portland 5 | |
Saída:
BETA_1 | Parâmetro de crescimento da resistência | | float
"""
if TEMPO < 28 :
if CIMENTO == 'CP1' or CIMENTO == 'CP2':
S = 0.25
elif CIMENTO == 'CP3' or CIMENTO == 'CP4':
S = 0.38
elif CIMENTO == 'CP5':
S = 0.20
BETA_1 = np.exp(S * (1 - (28 / TEMPO) ** 0.50))
else :
BETA_1 = 1
return BETA_1
def MODULO_ELASTICIDADE_CONCRETO(AGREGADO, F_CK, F_CKJ):
"""
Esta função calcula os módulos de elasticidade do concreto.
Entrada:
AGREGADO | Tipo de agragado usado no traço do cimento | | string
| 'BAS' - Agregado de Basalto | |
| 'GRA' - Agregado de Granito | |
| 'CAL' - Agregado de Calcário | |
| 'ARE' - Agregado de Arenito | |
F_CK | Resistência característica à compressão | kN/m² | float
F_CKJ | Resistência característica à compressão idade J | kN/m² | float
Saída:
E_CIJ | Módulo de elasticidade tangente | kN/m² | float
E_CSJ | Módulo de elasticidade do secante | kN/m² | float
"""
# Determinação do módulo tangente E_CI idade T
if AGREGADO == 'BAS':
ALFA_E = 1.2
elif AGREGADO == 'GRA':
ALFA_E = 1.0
elif AGREGADO == 'CAL':
ALFA_E = 0.9
elif AGREGADO == 'ARE':
ALFA_E = 0.7
F_CK /= 1E3
if F_CK <= 50:
E_CI = ALFA_E * 5600 * np.sqrt(F_CK)
elif F_CK > 50:
E_CI = 21.5 * (10 ** 3) * ALFA_E * (F_CK / 10 + 1.25) ** (1 / 3)
ALFA_I = 0.8 + 0.2 * F_CK / 80
if ALFA_I > 1:
ALFA_I = 1
# Determinação do módulo secante E_CS idade T
E_CS = E_CI * ALFA_I
if F_CK <= 45 :
F_CK *= 1E3
E_CIJ = E_CI * (F_CKJ / F_CK) ** 0.5
elif F_CK > 45 :
F_CK *= 1E3
E_CIJ = E_CI * (F_CKJ / F_CK) ** 0.3
E_CSJ = E_CIJ * ALFA_I
E_CIJ *= 1E3
E_CSJ *= 1E3
return E_CIJ, E_CSJ
def PROP_MATERIAL(F_CK, TEMPO, CIMENTO, AGREGADO):
"""
Esta função determina propriedades do concreto em uma idade TEMPO.
Entrada:
F_CK | Resistência característica à compressão | kN/m² | float
TEMPO | Tempo | dias | float
CIMENTO | Cimento utilizado | | string
| 'CP1' - Cimento portland 1 | |
| 'CP2' - Cimento portland 2 | |
| 'CP3' - Cimento portland 3 | |
| 'CP4' - Cimento portland 4 | |
| 'CP5' - Cimento portland 5 | |
AGREGADO | Tipo de agragado usado no traço do cimento | | string
| 'BAS' - Agregado de Basalto | |
| 'GRA' - Agregado de Granito | |
| 'CAL' - Agregado de Calcário | |
| 'ARE' - Agregado de Arenito | |
Saída:
F_CKJ | Resistência característica à compressão idade J | kN/m² | float
F_CTMJ | Resistência média caracteristica a tração idade J | kN/m² | float
F_CTKINFJ | Resistência média caracteristica a tração inf idade J | kN/m² | float
F_CTKSUPJ | Resistência média caracteristica a tração sup idade J | kN/m² | float
E_CIJ | Módulo de elasticidade tangente | kN/m² | float
E_CSJ | Módulo de elasticidade do secante | kN/m² | float
"""
# Propriedades em situação de compressão F_C idade TEMPO em dias
BETA_1 = FATOR_BETA1(TEMPO, CIMENTO)
F_CKJ = F_CK * BETA_1
F_CKJ /= 1E3
F_CK /= 1E3
if F_CKJ < 21 :
F_CKJ = 21
# Propriedades em situação de tração F_CT idade TEMPO em dias
if F_CK <= 50:
F_CTMJ = 0.3 * F_CKJ ** (2/3)
elif F_CK > 50:
F_CTMJ = 2.12 * | np.log(1 + 0.11 * F_CKJ) | numpy.log |
import sys
import warnings
import itertools
import platform
import pytest
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_warns, HAS_REFCOUNT
)
class TestResize(object):
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_zeroresize(self):
A = np.array([[1, 2], [3, 4]])
Ar = np.resize(A, (0,))
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
Ar = np.resize(A, (0, 2))
assert_equal(Ar.shape, (0, 2))
Ar = np.resize(A, (2, 0))
assert_equal(Ar.shape, (2, 0))
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32)])
Ar = np.resize(A, (2, 1))
assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
assert_equal(A.dtype, Ar.dtype)
class TestNonarrayArgs(object):
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
[3, 4, 5],
[5, 6, 7]]
tgt = [5, 1, 5]
a = [2, 0, 1]
out = np.choose(a, choices)
assert_equal(out, tgt)
def test_clip(self):
arr = [-1, 5, 2, 3, 10, -4, -9]
out = np.clip(arr, 2, 7)
tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt)
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_count_nonzero(self):
arr = [[0, 1, 7, 0, 0],
[3, 0, 0, 2, 19]]
tgt = np.array([2, 3])
out = np.count_nonzero(arr, axis=1)
assert_equal(out, tgt)
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
def test_diagonal(self):
a = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
out = np.diagonal(a)
tgt = [0, 5, 10]
assert_equal(out, tgt)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.mean(A) == 3.5)
assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.mean([])))
assert_(w[0].category is RuntimeWarning)
def test_ptp(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.ptp(a, axis=0), 15.0)
def test_prod(self):
arr = [[1, 2, 3, 4],
[5, 6, 7, 9],
[10, 3, 4, 5]]
tgt = [24, 1890, 600]
assert_equal(np.prod(arr, axis=-1), tgt)
def test_ravel(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
assert_equal(np.ravel(a), tgt)
def test_repeat(self):
a = [1, 2, 3]
tgt = [1, 1, 2, 2, 3, 3]
out = np.repeat(a, 2)
assert_equal(out, tgt)
def test_reshape(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(np.reshape(arr, (2, 6)), tgt)
def test_round(self):
arr = [1.56, 72.54, 6.35, 3.25]
tgt = [1.6, 72.5, 6.4, 3.2]
assert_equal(np.around(arr, decimals=1), tgt)
def test_searchsorted(self):
arr = [-8, -5, -1, 3, 6, 10]
out = np.searchsorted(arr, 0)
assert_equal(out, 3)
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.size(A) == 6)
assert_(np.size(A, 0) == 2)
assert_(np.size(A, 1) == 3)
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_equal(np.squeeze(A).shape, (3, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.std(A), 1.707825127659933)
assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.std([])))
assert_(w[0].category is RuntimeWarning)
def test_swapaxes(self):
tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
out = np.swapaxes(a, 0, 2)
assert_equal(out, tgt)
def test_sum(self):
m = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
tgt = [[6], [15], [24]]
out = np.sum(m, axis=1, keepdims=True)
assert_equal(tgt, out)
def test_take(self):
tgt = [2, 3, 5]
indices = [1, 2, 4]
a = [1, 2, 3, 4, 5]
out = np.take(a, indices)
assert_equal(out, tgt)
def test_trace(self):
c = [[1, 2], [3, 4], [5, 6]]
assert_equal(np.trace(c), 5)
def test_transpose(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
assert_equal(np.transpose(arr, (1, 0)), tgt)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.var(A), 2.9166666666666665)
assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.var([])))
assert_(w[0].category is RuntimeWarning)
B = np.array([None, 0])
B[0] = 1j
assert_almost_equal(np.var(B), 0.25)
class TestIsscalar(object):
def test_isscalar(self):
assert_(np.isscalar(3.1))
assert_(np.isscalar(np.int16(12345)))
assert_(np.isscalar(False))
assert_(np.isscalar('numpy'))
assert_(not np.isscalar([3.1]))
assert_(not np.isscalar(None))
# PEP 3141
from fractions import Fraction
assert_(np.isscalar(Fraction(5, 17)))
from numbers import Number
assert_(np.isscalar(Number()))
class TestBoolScalar(object):
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
assert_((t and s) is s)
assert_((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
assert_((t | t) is t)
assert_((f | t) is t)
assert_((t | f) is t)
assert_((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
assert_((t & t) is t)
assert_((f & t) is f)
assert_((t & f) is f)
assert_((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
assert_((t ^ t) is f)
assert_((f ^ t) is t)
assert_((t ^ f) is t)
assert_((f ^ f) is f)
class TestBoolArray(object):
def setup(self):
# offset for simd tests
self.t = np.array([True] * 41, dtype=bool)[1::]
self.f = np.array([False] * 41, dtype=bool)[1::]
self.o = np.array([False] * 42, dtype=bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
self.nm[-2] = True
self.im[3] = False
self.im[-2] = False
def test_all_any(self):
assert_(self.t.all())
assert_(self.t.any())
assert_(not self.f.all())
assert_(not self.f.any())
assert_(self.nm.any())
assert_(self.im.any())
assert_(not self.nm.all())
assert_(not self.im.all())
# check bad element in all positions
for i in range(256 - 7):
d = np.array([False] * 256, dtype=bool)[7::]
d[i] = True
assert_(np.any(d))
e = np.array([True] * 256, dtype=bool)[7::]
e[i] = False
assert_(not np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
d = np.array([False] * 100043, dtype=bool)
d[i] = True
assert_(np.any(d), msg="%r" % i)
e = np.array([True] * 100043, dtype=bool)
e[i] = False
assert_(not np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
assert_array_equal(np.abs(~self.t), self.f)
assert_array_equal(np.abs(~self.f), self.t)
assert_array_equal(np.abs(self.f), self.f)
assert_array_equal(~np.abs(self.f), self.t)
assert_array_equal(~np.abs(self.t), self.f)
assert_array_equal(np.abs(~self.nm), self.im)
np.logical_not(self.t, out=self.o)
assert_array_equal(self.o, self.f)
np.abs(self.t, out=self.o)
assert_array_equal(self.o, self.t)
def test_logical_and_or_xor(self):
assert_array_equal(self.t | self.t, self.t)
assert_array_equal(self.f | self.f, self.f)
assert_array_equal(self.t | self.f, self.t)
assert_array_equal(self.f | self.t, self.t)
np.logical_or(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t & self.t, self.t)
assert_array_equal(self.f & self.f, self.f)
assert_array_equal(self.t & self.f, self.f)
assert_array_equal(self.f & self.t, self.f)
np.logical_and(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t ^ self.t, self.f)
assert_array_equal(self.f ^ self.f, self.f)
assert_array_equal(self.t ^ self.f, self.t)
assert_array_equal(self.f ^ self.t, self.t)
np.logical_xor(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.f)
assert_array_equal(self.nm & self.t, self.nm)
assert_array_equal(self.im & self.f, False)
assert_array_equal(self.nm & True, self.nm)
assert_array_equal(self.im & False, self.f)
assert_array_equal(self.nm | self.t, self.t)
assert_array_equal(self.im | self.f, self.im)
assert_array_equal(self.nm | True, self.t)
assert_array_equal(self.im | False, self.im)
assert_array_equal(self.nm ^ self.t, self.im)
assert_array_equal(self.im ^ self.f, self.im)
assert_array_equal(self.nm ^ True, self.im)
assert_array_equal(self.im ^ False, self.im)
class TestBoolCmp(object):
def setup(self):
self.f = np.ones(256, dtype=np.float32)
self.ef = np.ones(self.f.size, dtype=bool)
self.d = np.ones(128, dtype=np.float64)
self.ed = np.ones(self.d.size, dtype=bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
self.f[s:s+8] = [i & 2**x for x in range(8)]
self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
s += 8
s = 0
for i in range(16):
self.d[s:s+4] = [i & 2**x for x in range(4)]
self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
s += 4
self.nf = self.f.copy()
self.nd = self.d.copy()
self.nf[self.ef] = np.nan
self.nd[self.ed] = np.nan
self.inff = self.f.copy()
self.infd = self.d.copy()
self.inff[::3][self.ef[::3]] = np.inf
self.infd[::3][self.ed[::3]] = np.inf
self.inff[1::3][self.ef[1::3]] = -np.inf
self.infd[1::3][self.ed[1::3]] = -np.inf
self.inff[2::3][self.ef[2::3]] = np.nan
self.infd[2::3][self.ed[2::3]] = np.nan
self.efnonan = self.ef.copy()
self.efnonan[2::3] = False
self.ednonan = self.ed.copy()
self.ednonan[2::3] = False
self.signf = self.f.copy()
self.signd = self.d.copy()
self.signf[self.ef] *= -1.
self.signd[self.ed] *= -1.
self.signf[1::6][self.ef[1::6]] = -np.inf
self.signd[1::6][self.ed[1::6]] = -np.inf
self.signf[3::6][self.ef[3::6]] = -np.nan
self.signd[3::6][self.ed[3::6]] = -np.nan
self.signf[4::6][self.ef[4::6]] = -0.
self.signd[4::6][self.ed[4::6]] = -0.
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
for i in range(2):
assert_array_equal(self.d[i:] > 0, self.ed[i:])
assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
assert_array_equal(-self.d[i:] < 0, self.ed[i:])
assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
r = self.d[i:] != 0
assert_array_equal(r, self.ed[i:])
r2 = self.d[i:] != np.zeros_like(self.d[i:])
r3 = 0 != self.d[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
assert_array_equal( | np.isfinite(self.nd[i:]) | numpy.isfinite |
#! /usr/bin/env python
# Copyright 2021 <NAME>
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
import os
import sys
import yt
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
import numpy as np
import scipy.constants as scc
## This script performs various checks for the proton boron nuclear fusion module. The simulation
## that we check is made of 5 different tests, each with different proton, boron and alpha species.
##
## The first test is performed in the proton-boron center of mass frame. It could correspond to the
## physical case of a proton beam colliding with a boron beam. The kinetic energy of the colliding
## particles depends on the cell number in the z direction and varies in the few keV to few MeV
## range. All the particles within a cell have the exact same momentum, which allows detailed
## checks of the energy of produced alpha particles. The proton and boron species have the same
## density and number of particles in this test. The number of produced alphas is much smaller than
## the initial number of protons and borons.
##
## The second test is performed in the boron rest frame. It corresponds to the physical case of a
## low density proton beam colliding with a high-density proton+boron target. The energy of the
## proton beam is varied in the few keV to few MeV range, depending on the cell number in the z
## direction. As in the previous case, all the particles within a cell have the exact same
## momentum, which allows detailed checks of the energy of produced alpha particles. In this test,
## there are 100 immobile boron and 100 immobile proton macroparticles per cell, as well as 900
## beam proton macroparticles per cell. The density of the immobile particles is 6 orders of
## magnitude higher than the number of beam particles, which means that they have a much higher
## weight. This test is similar to the example given in section 3 of Higginson et al.,
## Journal of Computation Physics, 388 439–453 (2019), which was found to be sensitive to the way
## unsampled pairs are accounted for. As before, the number of produced alphas is much smaller than
## the initial number of protons and borons.
##
## The third test corresponds to a Maxwellian plasma with a 44 keV temperature. The alpha yield is
## directly compared to the analytical fits of <NAME> and <NAME>, Nuclear Fusion, 40, 865
## (2000) for a thermal plasma.
##
## The fourth test corresponds to a plasma with an extremely small boron density, so that all boron
## macroparticles should have disappeared by the end of the simulation, which we verify.
##
## The fifth test is exactly the same as the fourth test, except that the
## fusion_probability_threshold parameter is increased to an excessive value. Because of that, we
## severely underestimate the fusion yield and boron macroparticles remain at the end of the
## simulation, which we verify.
##
## In all simulations, we check particle number, charge, momentum and energy conservation and
## perform basic checks regarding the produced particles. When possible, we also compare the number
## of produced macroparticles, fusion yield and energy of the produced particles to theoretical
## values.
##
## Please be aware that the relative tolerances are often set empirically in this analysis script,
## so it would not be surprising that some tolerances need to be increased in the future.
default_tol = 1.e-12 # Default relative tolerance
## Some physical parameters
keV_to_Joule = scc.e*1e3
MeV_to_Joule = scc.e*1e6
barn_to_square_meter = 1.e-28
m_p = scc.m_p # Proton mass
m_b = 10.9298*m_p # Boron 11 mass
m_reduced = m_p*m_b/(m_p+m_b)
m_a = 3.97369*m_p # Alpha mass
m_be = 7.94748*m_p # Beryllium 8 mass
Z_boron = 5.
Z_proton = 1.
E_Gamow = (Z_boron*Z_proton*np.pi*scc.fine_structure)**2*2.*m_reduced*scc.c**2
E_Gamow_MeV = E_Gamow/MeV_to_Joule
E_Gamow_keV = E_Gamow/keV_to_Joule
E_fusion = 8.59009*MeV_to_Joule # Energy released during p + B -> alpha + Be
E_decay = 0.0918984*MeV_to_Joule # Energy released during Be -> 2*alpha
E_fusion_total = E_fusion + E_decay # Energy released during p + B -> 3*alpha
## Some numerical parameters for this test
size_x = 8
size_y = 8
size_z = 16
dV_total = size_x*size_y*size_z # Total simulation volume
# Volume of a slice corresponding to a single cell in the z direction. In tests 1 and 2, all the
# particles of a given species in the same slice have the exact same momentum
dV_slice = size_x*size_y
dt = 1./(scc.c*np.sqrt(3.))
# In test 1 and 2, the energy in cells number i (in z direction) is typically Energy_step * i**2
Energy_step = 22.*keV_to_Joule
def is_close(val1, val2, rtol=default_tol, atol=0.):
## Wrapper around numpy.isclose, used to override the default tolerances.
return np.isclose(val1, val2, rtol=rtol, atol=atol)
def add_existing_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix):
data_dict[prefix+"_px_"+suffix] = yt_ad[species_name, "particle_momentum_x"].v
data_dict[prefix+"_py_"+suffix] = yt_ad[species_name, "particle_momentum_y"].v
data_dict[prefix+"_pz_"+suffix] = yt_ad[species_name, "particle_momentum_z"].v
data_dict[prefix+"_w_"+suffix] = yt_ad[species_name, "particle_weight"].v
data_dict[prefix+"_id_"+suffix] = yt_ad[species_name, "particle_id"].v
data_dict[prefix+"_cpu_"+suffix] = yt_ad[species_name, "particle_cpu"].v
data_dict[prefix+"_z_"+suffix] = yt_ad[species_name, "particle_position_z"].v
def add_empty_species_to_dict(data_dict, species_name, prefix, suffix):
data_dict[prefix+"_px_"+suffix] = np.empty(0)
data_dict[prefix+"_py_"+suffix] = np.empty(0)
data_dict[prefix+"_pz_"+suffix] = np.empty(0)
data_dict[prefix+"_w_"+suffix] = np.empty(0)
data_dict[prefix+"_id_"+suffix] = np.empty(0)
data_dict[prefix+"_cpu_"+suffix] = np.empty(0)
data_dict[prefix+"_z_"+suffix] = np.empty(0)
def add_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix):
try:
## If species exist, we add its data to the dictionary
add_existing_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix)
except yt.utilities.exceptions.YTFieldNotFound:
## If species does not exist, we avoid python crash and add empty arrays to the
## dictionnary. Currently, this happens for the boron species in test number 4, which
## entirely fuses into alphas.
add_empty_species_to_dict(data_dict, species_name, prefix, suffix)
def check_particle_number_conservation(data):
total_w_proton_start = np.sum(data["proton_w_start"])
total_w_proton_end = np.sum(data["proton_w_end"])
total_w_boron_start = np.sum(data["boron_w_start"])
total_w_boron_end = np.sum(data["boron_w_end"])
consumed_proton = total_w_proton_start - total_w_proton_end
consumed_boron = total_w_boron_start - total_w_boron_end
created_alpha = np.sum(data["alpha_w_end"])
assert(consumed_proton >= 0.)
assert(consumed_boron >= 0.)
assert(created_alpha >= 0.)
## Check that number of consumed proton and consumed boron are equal
assert_scale = max(total_w_proton_start, total_w_boron_start)
assert(is_close(consumed_proton, consumed_boron, rtol = 0., atol = default_tol*assert_scale))
## Check that number of consumed particles corresponds to number of produced alpha
## Factor 3 is here because each nuclear fusion reaction produces 3 alphas
assert(is_close(total_w_proton_start, total_w_proton_end + created_alpha/3.))
assert(is_close(total_w_boron_start, total_w_boron_end + created_alpha/3.))
def compute_energy_array(data, species_name, suffix, m):
## Relativistic computation of kinetic energy for a given species
psq_array = data[species_name+'_px_'+suffix]**2 + data[species_name+'_py_'+suffix]**2 + \
data[species_name+'_pz_'+suffix]**2
rest_energy = m*scc.c**2
return np.sqrt(psq_array*scc.c**2 + rest_energy**2) - rest_energy
def check_energy_conservation(data):
proton_energy_start = compute_energy_array(data, "proton", "start", m_p)
proton_energy_end = compute_energy_array(data, "proton", "end", m_p)
boron_energy_start = compute_energy_array(data, "boron", "start", m_b)
boron_energy_end = compute_energy_array(data, "boron", "end", m_b)
alpha_energy_end = compute_energy_array(data, "alpha", "end", m_a)
total_energy_start = np.sum(proton_energy_start*data["proton_w_start"]) + \
np.sum(boron_energy_start*data["boron_w_start"])
total_energy_end = np.sum(proton_energy_end*data["proton_w_end"]) + \
np.sum(boron_energy_end*data["boron_w_end"]) + \
np.sum(alpha_energy_end*data["alpha_w_end"])
## Factor 3 is here because each nuclear fusion reaction produces 3 alphas
n_fusion_reaction = np.sum(data["alpha_w_end"])/3.
assert(is_close(total_energy_end,
total_energy_start + n_fusion_reaction*E_fusion_total,
rtol = 1.e-8))
def check_momentum_conservation(data):
proton_total_px_start = np.sum(data["proton_px_start"]*data["proton_w_start"])
proton_total_py_start = np.sum(data["proton_py_start"]*data["proton_w_start"])
proton_total_pz_start = np.sum(data["proton_pz_start"]*data["proton_w_start"])
proton_total_px_end = np.sum(data["proton_px_end"]*data["proton_w_end"])
proton_total_py_end = np.sum(data["proton_py_end"]*data["proton_w_end"])
proton_total_pz_end = np.sum(data["proton_pz_end"]*data["proton_w_end"])
boron_total_px_start = np.sum(data["boron_px_start"]*data["boron_w_start"])
boron_total_py_start = np.sum(data["boron_py_start"]*data["boron_w_start"])
boron_total_pz_start = np.sum(data["boron_pz_start"]*data["boron_w_start"])
boron_total_px_end = np.sum(data["boron_px_end"]*data["boron_w_end"])
boron_total_py_end = np.sum(data["boron_py_end"]*data["boron_w_end"])
boron_total_pz_end = np.sum(data["boron_pz_end"]*data["boron_w_end"])
alpha_total_px_end = np.sum(data["alpha_px_end"]*data["alpha_w_end"])
alpha_total_py_end = np.sum(data["alpha_py_end"]*data["alpha_w_end"])
alpha_total_pz_end = np.sum(data["alpha_pz_end"]*data["alpha_w_end"])
total_px_start = proton_total_px_start + boron_total_px_start
total_py_start = proton_total_py_start + boron_total_py_start
total_pz_start = proton_total_pz_start + boron_total_pz_start
total_px_end = proton_total_px_end + boron_total_px_end + alpha_total_px_end
total_py_end = proton_total_py_end + boron_total_py_end + alpha_total_py_end
total_pz_end = proton_total_pz_end + boron_total_pz_end + alpha_total_pz_end
## Absolute tolerance is needed because sometimes the initial momentum is exactly 0
assert(is_close(total_px_start, total_px_end, atol=1.e-15))
assert(is_close(total_py_start, total_py_end, atol=1.e-15))
assert(is_close(total_pz_start, total_pz_end, atol=1.e-15))
def check_id(data):
## Check that all created particles have unique id + cpu identifier (two particles with
## different cpu can have the same id)
complex_id = data["alpha_id_end"] + 1j*data["alpha_cpu_end"]
assert(complex_id.shape == np.unique(complex_id).shape)
def basic_product_particles_check(data):
## For each nuclear fusion reaction in the code, we create 6 alpha macroparticles. So the
## total number of alpha macroparticles must be a multiple of 6.
num_alpha = data["alpha_w_end"].shape[0]
assert(num_alpha%6 == 0)
## The weight of the 6 macroparticles coming from a single fusion event should be the same.
## We verify this here.
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][1::6]))
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][2::6]))
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][3::6]))
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][4::6]))
assert(np.array_equal(data["alpha_w_end"][::6], data["alpha_w_end"][5::6]))
## When we create 6 macroparticles, the first has the exact same momentum as the second, the
## third has the same as the fourth and the fifth has the same as the sixth. We verify this
## here
assert(np.array_equal(data["alpha_px_end"][::6], data["alpha_px_end"][1::6]))
assert(np.array_equal(data["alpha_py_end"][::6], data["alpha_py_end"][1::6]))
assert(np.array_equal(data["alpha_pz_end"][::6], data["alpha_pz_end"][1::6]))
assert(np.array_equal(data["alpha_px_end"][2::6], data["alpha_px_end"][3::6]))
assert(np.array_equal(data["alpha_py_end"][2::6], data["alpha_py_end"][3::6]))
assert(np.array_equal(data["alpha_pz_end"][2::6], data["alpha_pz_end"][3::6]))
assert(np.array_equal(data["alpha_px_end"][4::6], data["alpha_px_end"][5::6]))
assert(np.array_equal(data["alpha_py_end"][4::6], data["alpha_py_end"][5::6]))
assert(np.array_equal(data["alpha_pz_end"][4::6], data["alpha_pz_end"][5::6]))
def generic_check(data):
check_particle_number_conservation(data)
check_energy_conservation(data)
check_momentum_conservation(data)
check_id(data)
basic_product_particles_check(data)
def check_isotropy(data, relative_tolerance):
## Checks that the alpha particles are emitted isotropically
average_px_sq = np.average(data["alpha_px_end"]*data["alpha_px_end"])
average_py_sq = np.average(data["alpha_py_end"]*data["alpha_py_end"])
average_pz_sq = np.average(data["alpha_pz_end"]*data["alpha_pz_end"])
assert(is_close(average_px_sq, average_py_sq, rtol = relative_tolerance))
assert(is_close(average_px_sq, average_pz_sq, rtol = relative_tolerance))
def astrophysical_factor_lowE(E):
## E is in keV
## Returns astrophysical factor in MeV b using the low energy fit in the range E < 400 keV
## described in equation (2) of <NAME> and <NAME>, Nuclear Fusion, 40, 865 (2000)
C0 = 197.
C1 = 0.24
C2 = 2.31e-4
AL = 1.82e4
EL = 148.
dEL = 2.35
return C0 + C1*E + C2*E**2 + AL/((E-EL)**2 + dEL**2)
def astrophysical_factor_midE(E):
## E is in keV
## Returns astrophysical factor in MeV b using the mid energy fit in the range
## 400 keV < E < 642 keV described in equation (3) of <NAME> and <NAME>,
## Nuclear Fusion, 40, 865 (2000)
D0 = 330.
D1 = 66.1
D2 = -20.3
D5 = -1.58
E_400 = 400.
E_100 = 100.
E_norm = (E - E_400)/E_100
return D0 + D1*E_norm + D2*E_norm**2 + D5*E_norm**5
def astrophysical_factor_highE(E):
## E is in keV
## Returns astrophysical factor in MeV b using the high energy fit in the range
## 642 keV < E < 3500 keV described in equation (4) of <NAME> and <NAME>,
## Nuclear Fusion, 40, 865 (2000)
A0 = 2.57e6
A1 = 5.67e5
A2 = 1.34e5
A3 = 5.68e5
E0 = 581.3
E1 = 1083.
E2 = 2405.
E3 = 3344.
dE0 = 85.7
dE1 = 234.
dE2 = 138.
dE3 = 309.
B = 4.38
return A0/((E-E0)**2 + dE0**2) + A1/((E-E1)**2 + dE1**2) + \
A2/((E-E2)**2 + dE2**2) + A3/((E-E3)**2 + dE3**2) + B
def astrophysical_factor(E):
## E is in keV
## Returns astrophysical factor in MeV b using the fits described in <NAME>
## and <NAME>, Nuclear Fusion, 40, 865 (2000)
conditions = [E <= 400, E <= 642, E > 642]
choices = [astrophysical_factor_lowE(E),
astrophysical_factor_midE(E),
astrophysical_factor_highE(E)]
return np.select(conditions, choices)
def pb_cross_section_buck_fit(E):
## E is in MeV
## Returns cross section in b using a power law fit of the data presented in Buck et al.,
## Nuclear Physics A, 398(2), 189-202 (1983) in the range E > 3.5 MeV.
E_start_fit = 3.5
## Cross section at E = E_start_fit = 3.5 MeV
cross_section_start_fit = 0.2168440845211521
slope_fit = -2.661840717596765
return cross_section_start_fit*(E/E_start_fit)**slope_fit
def pb_cross_section(E):
## E is in keV
## Returns cross section in b using the fits described in <NAME> and <NAME>,
## Nuclear Fusion, 40, 865 (2000) for E < 3.5 MeV and a power law fit of the data presented in
## Buck et al., Nuclear Physics A, 398(2), 189-202 (1983) for E > 3.5 MeV.
E_MeV = E/1.e3
conditions = [E <= 3500, E > 3500]
choices = [astrophysical_factor(E)/E_MeV * np.exp(-np.sqrt(E_Gamow_MeV / E_MeV)),
pb_cross_section_buck_fit(E_MeV)]
return np.select(conditions, choices)
def E_com_to_p_sq_com(m1, m2, E):
## E is the total (kinetic+mass) energy of a two particle (with mass m1 and m2) system in
## its center of mass frame, in J.
## Returns the square norm of the momentum of each particle in that frame.
return E**2/(4.*scc.c**2) - (m1**2 + m2**2)*scc.c**2/2. + \
scc.c**6/(4.*E**2)*((m1**2 - m2**2)**2)
def compute_relative_v_com(E):
## E is the kinetic energy of proton+boron in the center of mass frame, in keV
## Returns the relative velocity between proton and boron in this frame, in m/s
E_J = E*keV_to_Joule + (m_p + m_b)*scc.c**2
p_sq = E_com_to_p_sq_com(m_p, m_b, E_J)
p = np.sqrt(p_sq)
gamma_p = np.sqrt(1. + p_sq / (m_p*scc.c)**2)
gamma_b = np.sqrt(1. + p_sq / (m_b*scc.c)**2)
v_p = p/(gamma_p*m_p)
v_b = p/(gamma_b*m_b)
return v_p+v_b
def expected_alpha_weight_com(E_com, proton_density, boron_density, dV, dt):
## Computes expected number of produced alpha particles as a function of energy E_com in the
## center of mass frame. E_com is in keV.
assert(np.all(E_com>=0))
## Case E_com == 0 is handled manually to avoid division by zero
conditions = [E_com == 0, E_com > 0]
## Necessary to avoid division by 0 warning when pb_cross_section is evaluated
E_com_never_zero = np.clip(E_com, 1.e-15, None)
choices = [0., pb_cross_section(E_com_never_zero)*compute_relative_v_com(E_com_never_zero)]
sigma_times_vrel = np.select(conditions, choices)
## Factor 3 is here because each fusion reaction produces 3 alphas
return 3.*proton_density*boron_density*sigma_times_vrel*barn_to_square_meter*dV*dt
def check_macroparticle_number(data, fusion_probability_target_value, num_pair_per_cell):
## Checks that the number of macroparticles is as expected for the first and second tests
## The first slice 0 < z < 1 does not contribute to alpha creation
numcells = dV_total - dV_slice
## In these tests, the fusion_multiplier is so high that the fusion probability per pair is
## equal to the parameter fusion_probability_target_value
fusion_probability_per_pair = fusion_probability_target_value
expected_fusion_number = numcells*num_pair_per_cell*fusion_probability_per_pair
## Each fusion event produces 6 alpha macroparticles
expected_macroparticle_number = 6.*expected_fusion_number
std_macroparticle_number = 6.*np.sqrt(expected_fusion_number)
actual_macroparticle_number = data["alpha_w_end"].shape[0]
# 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions
assert(is_close(actual_macroparticle_number, expected_macroparticle_number, rtol = 0.,
atol = 5.*std_macroparticle_number))
## used in subsequent function
return expected_fusion_number
def p_sq_boron_frame_to_E_COM_frame(p_proton_sq):
# Takes the proton square norm of the momentum in the boron rest frame and returns the total
# kinetic energy in the center of mass frame. Everything is in SI units.
# Total (kinetic + mass) energy in lab frame
E_lab = np.sqrt(p_proton_sq*scc.c**2 + (m_p*scc.c**2)**2) + m_b*scc.c**2
# Use invariant E**2 - p**2c**2 of 4-momentum norm to compute energy in center of mass frame
E_com = np.sqrt(E_lab**2 - p_proton_sq*scc.c**2)
# Corresponding kinetic energy
E_com_kin = E_com - (m_b+scc.m_p)*scc.c**2
return E_com_kin
def p_sq_to_kinetic_energy(p_sq, m):
## Returns the kinetic energy of a particle as a function of its squared momentum.
## Everything is in SI units.
return np.sqrt(p_sq*scc.c**2 + (m*scc.c**2)**2) - (m*scc.c**2)
def compute_E_com1(data):
## Computes kinetic energy (in Joule) in the center of frame for the first test
## Square norm of the momentum of proton/boron as a function of cell number in z direction
p_sq = 2.*m_reduced*(Energy_step*np.arange(size_z)**2)
return p_sq_to_kinetic_energy(p_sq, m_b) + p_sq_to_kinetic_energy(p_sq, m_p)
def compute_E_com2(data):
## Computes kinetic energy (in Joule) in the center of frame for the second test
## Square norm of the momentum of the proton as a function of cell number in z direction
p_proton_sq = 2.*m_p*(Energy_step*np.arange(size_z)**2)
return p_sq_boron_frame_to_E_COM_frame(p_proton_sq)
def check_alpha_yield(data, expected_fusion_number, E_com, proton_density, boron_density):
## Checks that the fusion yield is as expected for the first and second tests.
## Proton and boron densities are in m^-3.
alpha_weight_theory = expected_alpha_weight_com(E_com/keV_to_Joule, proton_density,
boron_density, dV_slice, dt)
alpha_weight_simulation = np.histogram(data["alpha_z_end"], bins=size_z, range=(0, size_z),
weights = data["alpha_w_end"])[0]
## -1 is here because the first slice 0 < z < 1 does not contribute to alpha creation
expected_fusion_number_per_slice = expected_fusion_number/(size_z-1)
relative_std_alpha_weight = 1./np.sqrt(expected_fusion_number_per_slice)
# 5 sigma test that has an intrinsic probability to fail of 1 over ~2 millions
assert(np.all(is_close(alpha_weight_theory, alpha_weight_simulation,
rtol = 5.*relative_std_alpha_weight)))
def check_initial_energy1(data, E_com):
## In WarpX, the initial momentum of the alphas is computed assuming that the fusion process
## takes place in two steps:
## (1): proton + boron 11 -> alpha + beryllium 8
## (2): beryllium 8 -> alpha + alpha
## The alpha generated in the first step (labeled alpha1) generally has a different initial
## energy distribution than the alphas generated in the second step (labeled alpha2 and
## alpha3).
## In the first test, we are in the center of mass frame. Therefore, the momentum of alpha1 is
## entirely determined by the energy in the center of mass frame, so we check in this function
## that the energy of the alpha1 macroparticles is as expected. On the other hand, the energy
## of alpha2 and alpha3 follows a continuous distribution within a given range. In this test,
## we check that this range is as expected by comparing the maximum and minimum energy of the
## obtained macroparticles to the theoretical maximum and minimum.
## Note that in the simulations, 6 macroparticles are generated during for each fusion event.
## The first and second macroparticles are alpha1 particles. The third and fourth are alpha2.
## The fifth and sixth are alpha3.
energy_alpha_simulation = compute_energy_array(data, "alpha", "end", m_a)
z_alpha = data["alpha_z_end"]
# Loop over all slices (i.e. cells in the z direction)
for slice_number in range(1, size_z):
## Kinetic energy in the lab frame before fusion
E_kinetic_com_before = E_com[slice_number]
## Total (kinetic + mass) energy in the lab frame after
## proton + boron 11 -> alpha + beryllium 8
E_total_com_after = E_kinetic_com_before + E_fusion + (m_a + m_be)*scc.c**2
## Corresponding momentum norm squared of alpha1/beryllium
p_sq_after = E_com_to_p_sq_com(m_a, m_be, E_total_com_after)
## Corresponding kinetic energy for alpha1
energy_alpha1_theory = p_sq_to_kinetic_energy(p_sq_after, m_a)
## Corresponding kinetic energy for beryllium
energy_beryllium_theory = p_sq_to_kinetic_energy(p_sq_after, m_be)
## Corresponding kinetic energy for alpha2 + alpha3 after beryllium decay
energy_alpha2_plus_3_theory = energy_beryllium_theory + E_decay
## Compute the theoretical maximum and minimum energy of alpha2 and alpha3. This
## calculation is done nonrelativistically, by noting that the maximum (minimum) energy
## corresponds to an alpha emitted exactly in the (opposite) direction of the beryllium
## in the center of mass frame. This calculation involves solving a polynomial equation of
## order 2 in p_alpha23.
max_p_alpha23 = 0.5*(np.sqrt(p_sq_after) + \
np.sqrt(4*m_a*energy_alpha2_plus_3_theory - p_sq_after))
min_p_alpha23 = 0.5*(np.sqrt(p_sq_after) - \
np.sqrt(4*m_a*energy_alpha2_plus_3_theory - p_sq_after))
max_energy_alpha23 = max_p_alpha23**2/(2.*m_a)
min_energy_alpha23 = min_p_alpha23**2/(2.*m_a)
## Get the energy of all alphas in the slice
energy_alpha_slice = energy_alpha_simulation[(z_alpha >= slice_number)* \
(z_alpha < (slice_number + 1))]
## Energy of alphas1 (here, first macroparticle of each fusion event) in the slice
energy_alpha1_simulation = energy_alpha_slice[::6]
## Energy of alphas2 (here, third macroparticle of each fusion event) in the slice
energy_alpha2_simulation = energy_alpha_slice[2::6]
## Energy of alphas3 (here, fifth macroparticle of each fusion event) in the slice
energy_alpha3_simulation = energy_alpha_slice[4::6]
assert(np.all(is_close(energy_alpha1_simulation, energy_alpha1_theory, rtol=5.e-8)))
assert(is_close(np.amax(energy_alpha2_simulation), max_energy_alpha23, rtol=1.e-2))
assert(is_close(np.amin(energy_alpha2_simulation), min_energy_alpha23, rtol=1.e-2))
assert(is_close(np.amax(energy_alpha3_simulation), max_energy_alpha23, rtol=1.e-2))
assert(is_close(np.amin(energy_alpha3_simulation), min_energy_alpha23, rtol=1.e-2))
def check_initial_energy2(data):
## In WarpX, the initial momentum of the alphas is computed assuming that the fusion process
## takes place in two steps:
## (1): proton + boron 11 -> alpha + beryllium 8
## (2): beryllium 8 -> alpha + alpha
## The alpha generated in the first step (labeled alpha1) generally has a different initial
## energy distribution than the alphas generated in the second step (labeled alpha2 and
## alpha3).
## In the second test, we are in the boron rest frame. In this case, the momentum of each alpha
## follows a continuous distribution within a given range. In this function, we verify that
## this range is as expected by comparing the maximum and minimum energy of the obtained
## macroparticles to the theoretical maximum and minimum. Be aware that the range for alpha1
## is not the same as the range for alpha2 and alpha3 (typically alpha1 particles will carry
## more energy).
## Note that in the simulations, 6 macroparticles are generated during for each fusion event.
## The first and second macroparticles are alpha1 particles. The third and fourth are alpha2.
## The fifth and sixth are alpha3.
energy_alpha_simulation = compute_energy_array(data, "alpha", "end", m_a)
z_alpha = data["alpha_z_end"]
# Loop over all slices (i.e. cells in the z direction)
for slice_number in range(1, size_z):
## For simplicity, all the calculations in this functino are done nonrelativistically
## Proton kinetic energy in the lab frame before fusion
E_proton_nonrelativistic = Energy_step*slice_number**2
## Corresponding square norm of proton momentum
p_proton_sq = 2.*scc.m_p*E_proton_nonrelativistic
## Kinetic energy in the lab frame after
## proton + boron 11 -> alpha + beryllium 8
E_after_fusion = E_proton_nonrelativistic + E_fusion
## Compute the theoretical maximum and minimum energy of alpha1 in the lab frame. This
## calculation is done by noting that the maximum (minimum) energy corresponds to an alpha
## emitted exactly in the (opposite) direction of the proton in the lab frame. This
## calculation involves solving a polynomial equation of order 2 in p_alpha1.
max_p_alpha1 = (m_a/m_be*np.sqrt(p_proton_sq) + \
np.sqrt(-m_a/m_be*p_proton_sq + 2.*E_after_fusion*m_a*(m_a/m_be + 1.))) / \
(m_a/m_be + 1.)
min_p_alpha1 = (m_a/m_be*np.sqrt(p_proton_sq) - \
np.sqrt(-m_a/m_be*p_proton_sq + 2.*E_after_fusion*m_a*(m_a/m_be + 1.))) / \
(m_a/m_be + 1.)
max_energy_alpha1 = max_p_alpha1**2/(2*m_a)
min_energy_alpha1 = min_p_alpha1**2/(2*m_a)
## Corresponding max/min kinetic energy of Beryllium in the lab frame
max_E_beryllium = E_after_fusion - min_energy_alpha1
min_E_beryllium = E_after_fusion - max_energy_alpha1
## Corresponding max/min momentum square of Beryllium in the lab frame
max_p_sq_beryllium = 2.*m_be*max_E_beryllium
min_p_sq_beryllium = 2.*m_be*min_E_beryllium
## Corresponding max/min kinetic energy in the lab frame for alpha2 + alpha3 after
## Beryllium decay
max_energy_alpha2_plus_3 = max_E_beryllium + E_decay
min_energy_alpha2_plus_3 = min_E_beryllium + E_decay
## Compute the theoretical maximum and minimum energy of alpha2 and alpha3 in the lab
## frame. This calculation is done by noting that the maximum (minimum) energy corresponds
## to an alpha emitted exactly in the (opposite) direction of a beryllium with energy
## max_E_beryllium (min_E_beryllium). This calculation involves solving a polynomial
## equation of order 2 in p_alpha23.
max_p_alpha23 = 0.5*(np.sqrt(max_p_sq_beryllium) + \
np.sqrt(4*m_a*max_energy_alpha2_plus_3 - max_p_sq_beryllium))
min_p_alpha23 = 0.5*(np.sqrt(min_p_sq_beryllium) - \
np.sqrt(4*m_a*min_energy_alpha2_plus_3 - min_p_sq_beryllium))
max_energy_alpha23 = max_p_alpha23**2/(2*m_a)
min_energy_alpha23 = min_p_alpha23**2/(2*m_a)
## Get the energy of all alphas in the slice
energy_alpha_slice = energy_alpha_simulation[(z_alpha >= slice_number)* \
(z_alpha < (slice_number + 1))]
## Energy of alphas1 (here, first macroparticle of each fusion event) in the slice
energy_alpha1_simulation = energy_alpha_slice[::6]
## Energy of alphas2 (here, third macroparticle of each fusion event) in the slice
energy_alpha2_simulation = energy_alpha_slice[2::6]
## Energy of alphas3 (here, fifth macroparticle of each fusion event) in the slice
energy_alpha3_simulation = energy_alpha_slice[4::6]
assert(is_close(np.amax(energy_alpha1_simulation), max_energy_alpha1, rtol=1.e-2))
assert(is_close(np.amin(energy_alpha1_simulation), min_energy_alpha1, rtol=1.e-2))
## Tolerance is quite high below because we don't have a lot of alphas to produce good
## statistics and an event like alpha1 emitted exactly in direction of proton & alpha2
## emitted exactly in direction opposite to Beryllium is somewhat rare.
assert(is_close(np.amax(energy_alpha2_simulation), max_energy_alpha23, rtol=2.5e-1))
assert(is_close(np.amin(energy_alpha2_simulation), min_energy_alpha23, rtol=2.5e-1))
assert(is_close(np.amax(energy_alpha3_simulation), max_energy_alpha23, rtol=2.5e-1))
assert(is_close(np.amin(energy_alpha3_simulation), min_energy_alpha23, rtol=2.5e-1))
def check_xy_isotropy(data):
## Checks that the alpha particles are emitted isotropically in x and y
average_px_sq = np.average(data["alpha_px_end"]*data["alpha_px_end"])
average_py_sq = | np.average(data["alpha_py_end"]*data["alpha_py_end"]) | numpy.average |
""""""
# -*- coding: utf-8 -*-
# date: 2021
# author: AllChooseC
import os
import numpy as np
from tensorboardX import SummaryWriter
import torch
from tqdm import tqdm
from utils import EarlyStopping, class_penalty
class_distribution = [59.68, 8.68, 28.55, 3.08]
# 2017
class_distribution = [59.22, 8.65, 28.80, 3.33]
def loss_batch(model, loss_func, xb, yb, opt=None, metric=None):
"""Calculates the loss and metric value for a batch of data,
and optionally performs gradient descent if an optimizer is provided."""
preds = model(xb)
loss = loss_func(preds, yb, weight=class_penalty(class_distribution, class_penalty=0.2))
if opt is not None:
loss.backward() # Compute gradients
opt.step() # Update parameters
opt.zero_grad() # Reset gradients
metric_result = None
if metric is not None:
metric_result = metric(preds, yb) # Compute the metric
return loss.item(), len(xb), metric_result
def evaluate(model, loss_func, valid_dl, metric=None):
""""""
with torch.no_grad():
# Pass each batch through the model
results = [loss_batch(model, loss_func, xb, yb, metric=metric) for xb, yb in tqdm(valid_dl)]
losses, nums, metrics = zip(*results) # Separate losses, counts and metrics
total = np.sum(nums) # Total size of the dataset
avg_loss = np.sum(np.multiply(losses, nums)) / total
avg_metric = None
if metric is not None:
avg_metric = np.sum(np.multiply(metrics, nums)) / total
return avg_loss, total, avg_metric
def learn_on_batch(model, loss_func, train_dl, opt=None, metric=None):
""""""
# Pass each batch through the model
results = [loss_batch(model, loss_func, xb, yb, opt, metric=metric) for xb, yb in tqdm(train_dl)]
losses, nums, metrics = zip(*results) # Separate losses, counts and metrics
total = np.sum(nums) # Total size of the dataset
avg_loss = np.sum( | np.multiply(losses, nums) | numpy.multiply |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Analytical Quantum Gradient Descent (AQGD) optimizer."""
import logging
from typing import Callable, Tuple, List, Dict, Union
import numpy as np
from qiskit.utils.validation import validate_range_exclusive_max
from .optimizer import Optimizer, OptimizerSupportLevel
from ..exceptions import AlgorithmError
logger = logging.getLogger(__name__)
class AQGD(Optimizer):
"""Analytic Quantum Gradient Descent (AQGD) with Epochs optimizer.
Performs gradient descent optimization with a momentum term, analytic gradients,
and customized step length schedule for parametrized quantum gates, i.e.
Pauli Rotations. See, for example:
* <NAME>, <NAME>, <NAME>, and <NAME>. (2018).
Quantum circuit learning. Phys. Rev. A 98, 032309.
https://arxiv.org/abs/1803.00745
* <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. (2019).
Evaluating analytic gradients on quantum hardware. Phys. Rev. A 99, 032331.
https://arxiv.org/abs/1811.11184
for further details on analytic gradients of parametrized quantum gates.
Gradients are computed "analytically" using the quantum circuit when evaluating
the objective function.
"""
_OPTIONS = ['maxiter', 'eta', 'tol', 'disp', 'momentum', 'param_tol', 'averaging']
def __init__(self,
maxiter: Union[int, List[int]] = 1000,
eta: Union[float, List[float]] = 1.0,
tol: float = 1e-6, # this is tol
momentum: Union[float, List[float]] = 0.25,
param_tol: float = 1e-6,
averaging: int = 10) -> None:
"""
Performs Analytical Quantum Gradient Descent (AQGD) with Epochs.
Args:
maxiter: Maximum number of iterations (full gradient steps)
eta: The coefficient of the gradient update. Increasing this value
results in larger step sizes: param = previous_param - eta * deriv
tol: Tolerance for change in windowed average of objective values.
Convergence occurs when either objective tolerance is met OR parameter
tolerance is met.
momentum: Bias towards the previous gradient momentum in current
update. Must be within the bounds: [0,1)
param_tol: Tolerance for change in norm of parameters.
averaging: Length of window over which to average objective values for objective
convergence criterion
Raises:
AlgorithmError: If the length of ``maxiter``, `momentum``, and ``eta`` is not the same.
"""
super().__init__()
if isinstance(maxiter, int):
maxiter = [maxiter]
if isinstance(eta, (int, float)):
eta = [eta]
if isinstance(momentum, (int, float)):
momentum = [momentum]
if len(maxiter) != len(eta) or len(maxiter) != len(momentum):
raise AlgorithmError("AQGD input parameter length mismatch. Parameters `maxiter`, "
"`eta`, and `momentum` must have the same length.")
for m in momentum:
validate_range_exclusive_max('momentum', m, 0, 1)
self._eta = eta
self._maxiter = maxiter
self._momenta_coeff = momentum
self._param_tol = param_tol
self._tol = tol
self._averaging = averaging
# state
self._avg_objval = None
self._prev_param = None
self._eval_count = 0 # function evaluations
self._prev_loss = [] # type: List[float]
self._prev_grad = [] # type: List[List[float]]
def get_support_level(self) -> Dict[str, OptimizerSupportLevel]:
""" Support level dictionary
Returns:
Dict[str, int]: gradient, bounds and initial point
support information that is ignored/required.
"""
return {
'gradient': OptimizerSupportLevel.ignored,
'bounds': OptimizerSupportLevel.ignored,
'initial_point': OptimizerSupportLevel.required
}
def _compute_objective_fn_and_gradient(self, params: List[float],
obj: Callable) -> Tuple[float, np.array]:
"""
Obtains the objective function value for params and the analytical quantum derivatives of
the objective function with respect to each parameter. Requires
2*(number parameters) + 1 objective evaluations
Args:
params: Current value of the parameters to evaluate the objective function
obj: Objective function of interest
Returns:
Tuple containing the objective value and array of gradients for the given parameter set.
"""
num_params = len(params)
param_sets_to_eval = params + np.concatenate(
(np.zeros((1, num_params)), # copy of the parameters as is
np.eye(num_params) * np.pi / 2, # copy of the parameters with the positive shift
-np.eye(num_params) * np.pi / 2), # copy of the parameters with the negative shift
axis=0)
# Evaluate,
# reshaping to flatten, as expected by objective function
values = np.array(obj(param_sets_to_eval.reshape(-1)))
# Update number of objective function evaluations
self._eval_count += 2 * num_params + 1
# return the objective function value
obj_value = values[0]
# return the gradient values
gradient = 0.5 * (values[1:num_params + 1] - values[1 + num_params:])
return obj_value, gradient
def _update(self, params: np.array, gradient: np.array, mprev: np.array,
step_size: float, momentum_coeff: float) -> Tuple[List[float], List[float]]:
"""
Updates full parameter array based on a step that is a convex
combination of the gradient and previous momentum
Args:
params: Current value of the parameters to evaluate the objective function at
gradient: Gradient of objective wrt parameters
mprev: Momentum vector for each parameter
step_size: The scaling of step to take
momentum_coeff: Bias towards previous momentum vector when updating current
momentum/step vector
Returns:
Tuple of the updated parameter and momentum vectors respectively.
"""
# Momentum update:
# Convex combination of previous momentum and current gradient estimate
mnew = (1 - momentum_coeff) * gradient + momentum_coeff * mprev
params -= step_size * mnew
return params, mnew
def _converged_objective(self, objval: float, tol: float, window_size: int) -> bool:
"""
Tests convergence based on the change in a moving windowed average of past objective values
Args:
objval: Current value of the objective function
tol: tolerance below which (average) objective function change must be
window_size: size of averaging window
Returns:
Bool indicating whether or not the optimization has converged.
"""
# If we haven't reached the required window length,
# append the current value, but we haven't converged
if len(self._prev_loss) < window_size:
self._prev_loss.append(objval)
return False
# Update last value in list with current value
self._prev_loss.append(objval)
# (length now = n+1)
# Calculate previous windowed average
# and current windowed average of objective values
prev_avg = np.mean(self._prev_loss[:window_size])
curr_avg = np.mean(self._prev_loss[1:window_size + 1])
self._avg_objval = curr_avg
# Update window of objective values
# (Remove earliest value)
self._prev_loss.pop(0)
if np.absolute(prev_avg - curr_avg) < tol:
# converged
logger.info("Previous obj avg: %f\nCurr obj avg: %f", prev_avg, curr_avg)
return True
return False
def _converged_parameter(self, parameter: List[float], tol: float) -> bool:
"""
Tests convergence based on change in parameter
Args:
parameter: current parameter values
tol: tolerance for change in norm of parameters
Returns:
Bool indicating whether or not the optimization has converged
"""
if self._prev_param is None:
self._prev_param = np.copy(parameter)
return False
order = np.inf
p_change = | np.linalg.norm(self._prev_param - parameter, ord=order) | numpy.linalg.norm |
import os
import sys
file_dir = os.path.dirname(__file__)
sys.path.append(file_dir)
import numpy as np
import torch
from pytorch_resnet import ResNet43
import torch.nn.functional as F
import kornia as K
import torchvision
from matplotlib import pyplot as plt
class Attention:
def __init__(self,in_shape,n_rotations,preprocess,device,lite=False):
# TODO BY HAOJIE: add lite model
self.device = device
self.preprocess = preprocess
self.n_rotations = n_rotations
max_dim = np.max(in_shape[:2])
max_dim = 480
self.padding = np.zeros((3, 2), dtype=int)
pad = (max_dim - np.array(in_shape[:2])) / 2
self.padding[:2] = pad.reshape(2, 1)
in_shape = np.array(in_shape)
in_shape += np.sum(self.padding, axis=1)
in_shape = tuple(in_shape)
self.in_type = in_shape[-1]
# self.in_type = 6, self.outdim=1
# get the location
self.model = ResNet43(self.in_type,outdim=1,include_batch_normal=False).to(self.device)
# use the location as pivot to rotate the image and get the angle
#self.angle_model = ResNet43(self.in_type,outdim=1,include_batch_normal=False).to(self.device)
self.optim = torch.optim.Adam(self.model.parameters(),lr=1e-5)
#self.pad_2 = (80,80,80,80)
def forward(self,in_img,softmax=True,train=True):
#print('padding',self.padding)
#print('img',in_img.shape)
in_data = np.pad(in_img, self.padding, mode='constant')
#print('indata',in_data.shape)
in_data = self.preprocess(in_data)
in_shape = (1,) + in_data.shape
in_data = in_data.reshape(in_shape).transpose(0, 3, 1, 2)
in_data = torch.from_numpy(in_data).to(self.device)
#print(in_data.size())
# rotate image
pivot = torch.as_tensor([in_data.shape[-2]/2,in_data.shape[-1]/2])
pivot =pivot.to(self.device).repeat(self.n_rotations//2,1).to(torch.float32)
in_data = in_data.repeat(self.n_rotations//2,1,1,1)
in_data = K.geometry.rotate(in_data,torch.from_numpy(-np.linspace(0., 360., self.n_rotations, endpoint=False, dtype=np.float32))[0:18].to(self.device), mode='nearest',center=pivot)
#print('indata rotate 36/2',in_data.shape)
#self.imshow(in_data,size=(36,12),name='rotation')
if not train:
self.model.eval()
with torch.no_grad():
logits = self.model(in_data)
else:
logits = self.model(in_data)
#print('logits',logits.shape)
# rotate back
logits = K.geometry.rotate(logits,torch.from_numpy(np.linspace(0., 360., self.n_rotations,
endpoint=False,dtype=np.float32))[0:18].to(self.device),
mode='nearest',center=pivot)
#print('atenion logits1',logits.shape)
#self.imshow(logits)
#self.imshow(logits,size=(36,12),name='rotation_back')
#logits = logits[:,:,80:-80,80:-80]
#print('first crop',logits.size())
c0 = self.padding[:2, 0]
c1 = c0 + in_img.shape[:2]
#print('crop',c0)
#print('crop',c1)
logits = logits[:, :, c0[0]:c1[0], c0[1]:c1[1]]
#print('second crop',logits.size())
#print('attention logits',logits.shape)
#self.imshow(logits)
output = logits.reshape(1,-1)
if softmax:
output = F.softmax(output,dim=-1)
output = output.reshape(logits.shape[0],logits.shape[-2],logits.shape[-1]).cpu().detach().numpy()
#print('output',output.shape)
output = output.transpose(1,2,0)
return output
def train(self,in_img,p,theta,backprop=True):
self.model.train()
self.optim.zero_grad()
output = self.forward(in_img,softmax=False)
# Get label
theta = (theta + 2*np.pi)%(2*np.pi)
if theta >= np.pi:
theta = theta -np.pi
# angle label
# dgree interval: 10
theta_i = theta / (2 * np.pi / 36)
# theta_i is in range [0,17]
theta_i = np.int32( | np.round(theta_i) | numpy.round |
import numpy as np
import pandas as pd
from numpy.linalg.linalg import LinAlgError
from numpy.linalg import norm
import matplotlib.pyplot as plt
import sys
from ctypes import CDLL, POINTER
from ctypes import c_int, c_double
# Load the library I created for extra speed
mylib = CDLL("./mylib.so")
# C-type corresponding to numpy 2-dimensional array (matrix)
ND_POINTER_1 = np.ctypeslib.ndpointer(dtype=np.float64, ndim=1, flags="C")
ND_POINTER_2 = np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags="C")
ND_POINTER_3 = np.ctypeslib.ndpointer(dtype=np.float64, ndim=3, flags="C")
# define the prototypes of the functions
mylib.lennard_jones_function.argtypes = [ND_POINTER_2, c_int, c_double, c_double]
mylib.lennard_jones_function.restype = c_double
mylib.evaluate.argtypes = [ND_POINTER_3, ND_POINTER_2, c_int, c_int]
mylib.evaluate.restype = None
# For Genetic Algorithms
# Evaluation
def evaluate_population(population, number_of_atoms):
values = np.zeros(shape=(population.shape[0], 1), dtype=np.float64)
mylib.evaluate(population, values, population.shape[0], number_of_atoms)
x_best_index = np.argmin(values)
return values, x_best_index, values.min()
# Selection
def roulette_wheel_selection(population, evaluations, selective_pressure):
descenting_order = np.argsort(evaluations, axis=0)[::-1]
population = population[descenting_order]
N = evaluations.shape[0]
fitness_scores = np.zeros(shape=(N, 1))
random_vector = np.random.uniform(low=0, high=1, size=(N, 1))
selected_indexs = np.zeros(shape=(N, 1), dtype=int)
for i, _ in enumerate(fitness_scores):
fitness_scores[i] = 2 - selective_pressure + 2 * (selective_pressure - 1) * (i - 1) / (N - 1)
selection_probabilities = fitness_scores / np.sum(fitness_scores)
for rn_index, random_number in enumerate(random_vector):
probability_sum = 0
for sp_index, selection_probability in enumerate(selection_probabilities):
probability_sum += selection_probability
if random_number <= probability_sum:
selected_indexs[rn_index] = sp_index
break
return np.squeeze(population[selected_indexs])
def tournament_selection(population, evaluations, tournament_size, dtype):
N = population.shape[0]
tournament_winners = np.zeros(shape=population.shape, dtype=dtype)
for i in range(0, N):
random_choices = np.random.choice(N, size=tournament_size, replace=False)
tournament_winner_index = evaluations[random_choices].argmin()
tournament_winners[i] = population[random_choices][tournament_winner_index]
return tournament_winners
def new_population_top_N(population, mutated_population, population_evaluations, mutated_population_evaluations):
N = population.shape[0]
all_population = np.stack((population, mutated_population), axis=0)
all_population = all_population.reshape((2 * population.shape[0], population.shape[1]))
all_evaluations = np.stack((population_evaluations, mutated_population_evaluations))
all_evaluations = all_evaluations.reshape((2 * population_evaluations.shape[0], 1))
ascending_order = np.argsort(all_evaluations, axis=0)
all_evaluations = all_evaluations[ascending_order]
all_evaluations = all_evaluations.reshape((all_evaluations.shape[0], 1))
all_population = all_population[ascending_order]
all_population = np.squeeze(all_population)
return all_population[0:N], all_evaluations[0:N]
# Genetic Algorithm Binary
def calculate_number_of_bits(Umin, Umax, error):
length_of_space = Umax - Umin
possible_numbers = 1 + length_of_space / error
for n in range(1, 64):
if np.power(2, n-1) < possible_numbers <= np.power(2, n):
return n
def calculate_base_10(binary_number):
number_base_10 = 0
for i, bi in enumerate(binary_number):
number_base_10 += bi * np.power(2, i)
return number_base_10
def calculate_number_base_10_in_feasible_space(Umin, Umax, n_bits, number_base_10):
length_of_space = Umax - Umin
return Umin + number_base_10 * length_of_space / (np.power(2, n_bits) - 1)
def decoder(population, Umin, Umax, number_of_atoms, dimensionality, n_bits):
population_base_10 = np.zeros(shape=(population.shape[0], number_of_atoms, dimensionality))
for i, pi in enumerate(population):
pi = np.array_split(pi, number_of_atoms)
for j, pij in enumerate(pi):
pij = np.array_split(pij, dimensionality)
pij_base_10 = list()
for binary_number in pij:
number_base_10 = calculate_base_10(binary_number)
number_base_10_fs = calculate_number_base_10_in_feasible_space(Umin, Umax, n_bits, number_base_10)
pij_base_10.append(number_base_10_fs)
population_base_10[i][j] = np.asarray(pij_base_10)
return population_base_10
def initialize_binary_population(population_size, number_of_atoms, dimensionality, n_bits):
population = np.random.randint(low=0, high=2, size=(population_size, number_of_atoms, dimensionality * n_bits))
population = population.reshape(population_size, number_of_atoms * dimensionality * n_bits)
return population
def crossover_binary_population(selected_population, crossover_rate, crossover_points):
# crossover_rate = [0, 1]
# crossover_points = m - 1, where m is the length of the dna
N = selected_population.shape[0]
to_crossover = np.random.uniform(low=0, high=1, size=(N, 1)) < crossover_rate
to_crossover_indexes = np.where(np.any(to_crossover==True, axis=1))[0]
crossover_population = np.array(selected_population)
if to_crossover_indexes.shape[0] % 2 != 0:
random_choice = np.random.randint(low=0, high=N)
to_crossover_indexes = np.append(to_crossover_indexes, random_choice)
parents = selected_population[to_crossover_indexes]
children = np.zeros(shape=(parents.shape[0], parents.shape[1]), dtype=int)
if parents.shape[0] == 0: return selected_population
points_of_crossover = np.arange(1, selected_population.shape[1])
np.random.shuffle(points_of_crossover)
points_of_crossover = points_of_crossover[:crossover_points]
points_of_crossover = np.sort(points_of_crossover, axis=0)
for i in range(0, parents.shape[0], 2):
parent_0 = np.array_split(parents[i], points_of_crossover)
parent_1 = np.array_split(parents[i + 1], points_of_crossover)
child_0, child_1 = list(), list()
for j in range(0, crossover_points + 1):
if j % 2 == 0:
child_0.append(parent_0[j])
child_1.append(parent_1[j])
else:
child_0.append(parent_1[j])
child_1.append(parent_0[j])
child_0 = np.asarray(child_0, dtype=object)
child_1 = np.asarray(child_1, dtype=object)
children[i] = np.concatenate(child_0, axis=None)
children[i + 1] = np.concatenate(child_1, axis=None)
# Replace parents with their children
for child_index, parent_index in enumerate(to_crossover_indexes):
crossover_population[parent_index] = children[child_index]
return crossover_population
def mutation_binary_population(crossover_population, mutation_rate):
# mutation_rate = [0, 1]
mutated_population = np.array(crossover_population)
for i, pi in enumerate(mutated_population):
to_mutate = np.random.uniform(low=0, high=1, size=(pi.shape[0], 1)) < mutation_rate
to_mutate_indexes = np.where(np.any(to_mutate==True, axis=1))[0]
for j in to_mutate_indexes:
pi[j] = 1 - pi[j]
mutated_population[i] = pi
return mutated_population
def Genetic_Algorithm_Binary(Umin, Umax, number_of_atoms, selection_method):
# Algorithm parameters
population_size = 1000
selective_pressure = 1.3 # selective_pressure = [1, 2]
tournament_size = 100 # tournament_size = [1, population_size]
crossover_rate = 0.5 # crossover_rate = [0, 1]
crossover_points = 6 # crossover_points = [0, m-1]
mutation_rate = 0.1 # mutation_rate = [0, 1]
# Do not change
error = 1e-3
dimensionality = 3 # 3D space
n_bits = calculate_number_of_bits(Umin, Umax, error)
iteration = population_size
best_iteration = iteration
max_iterations = number_of_atoms * 1e+5
population = initialize_binary_population(population_size, number_of_atoms, dimensionality, n_bits)
decoded_population = decoder(population, Umin, Umax, number_of_atoms, dimensionality, n_bits)
population_evaluations, x_best_index, min_value = evaluate_population(decoded_population, number_of_atoms)
x_best = population[x_best_index]
x_best_fvalue = min_value
while(True):
if (max_iterations <= iteration): break
iteration += population_size
if(selection_method == "rw"):
selected_population = roulette_wheel_selection(population, population_evaluations, selective_pressure)
else:
selected_population = tournament_selection(population, population_evaluations, tournament_size, int)
crossover_population = crossover_binary_population(selected_population, crossover_rate, crossover_points)
mutated_population = mutation_binary_population(crossover_population, mutation_rate)
decoded_population = decoder(mutated_population, Umin, Umax, number_of_atoms, dimensionality, n_bits)
mutated_population_evaluations, _, _ = evaluate_population(decoded_population, number_of_atoms)
population, population_evaluations = new_population_top_N(population, mutated_population, population_evaluations, mutated_population_evaluations)
if population_evaluations[0] < x_best_fvalue:
x_best, x_best_fvalue = population[0], population_evaluations[0]
best_iteration = iteration
print("Iterations: %d/%d Lennard-Jones potential: %.10f!!!" % (iteration, max_iterations, x_best_fvalue))
else:
print("Iterations: %d/%d Lennard-Jones potential: %.10f" % (iteration, max_iterations, x_best_fvalue))
x_best = x_best.reshape((1, x_best.size))
population = np.append(population, x_best, axis=0)
decoded_population = decoder(population, Umin, Umax, number_of_atoms, dimensionality, n_bits)
population_evaluations, x_best_index, x_best_fvalue = evaluate_population(decoded_population, number_of_atoms)
x_best = decoded_population[x_best_index]
return x_best, x_best_fvalue, decoded_population, population_evaluations, best_iteration
# Genetic Algorithm Real
def initialize_real_population(Umin, Umax, population_size, number_of_atoms, dimensionality):
population = np.random.uniform(low=Umin, high=Umax, size=(population_size, number_of_atoms, dimensionality))
return population
def crossover_real_population(selected_population, crossover_rate, delta=0.25):
# crossover_rate = [0, 1]
# delta > 0
N = selected_population.shape[0]
to_crossover = np.random.uniform(low=0, high=1, size=(N, 1)) < crossover_rate
to_crossover_indexes = np.where(np.any(to_crossover==True, axis=1))[0]
crossover_population = np.array(selected_population)
if to_crossover_indexes.shape[0] % 2 != 0:
random_choice = np.random.randint(low=0, high=N)
to_crossover_indexes = np.append(to_crossover_indexes, random_choice)
parents = selected_population[to_crossover_indexes]
children = np.zeros(shape=parents.shape, dtype=float)
if parents.shape[0] == 0: return selected_population
for i in range(0, parents.shape[0], 2):
# Create a pair of children for a pair of parents
for j in range(0, 2):
random_vector = np.random.uniform(low=-delta, high=1+delta, size=selected_population.shape[1])
child = np.multiply(random_vector, parents[i]) + np.multiply((1 - random_vector), parents[i + 1])
children[i + j] = child
# Replace parents with their children
for child_index, parent_index in enumerate(to_crossover_indexes):
crossover_population[parent_index] = children[child_index]
return crossover_population
def mutation_real_population(crossover_population, mutation_rate, Umin, Umax):
# mutation_rate = [0, 1]
mutated_population = np.array(crossover_population)
for i, pi in enumerate(mutated_population):
to_mutate = np.random.uniform(low=0, high=1, size=(pi.shape[0], 1)) < mutation_rate
to_mutate_indexes = np.where(np.any(to_mutate==True, axis=1))[0]
for j in to_mutate_indexes:
distance_from_Umin = abs(abs(pi[j]) - abs(Umin))
distance_from_Umax = abs(abs(pi[j]) - abs(Umax))
min_distance = min(distance_from_Umin, distance_from_Umax)
sigma = min_distance / 3
zj = np.random.normal(0, sigma)
pi[j] = pi[j] + zj
mutated_population[i] = pi
return mutated_population
def Genetic_Algorithm_Real(Umin, Umax, number_of_atoms, selection_method):
# Algorithm parameters
population_size = 1000
selective_pressure = 1.3
tournament_size = 100
crossover_rate = 0.5
mutation_rate = 0.1
# Do not change
dimensionality = 3
iteration = population_size
best_iteration = iteration
max_iterations = number_of_atoms * 1e+5
population = initialize_real_population(Umin, Umax, population_size, number_of_atoms, dimensionality)
population_evaluations, x_best_index, min_value = evaluate_population(population, number_of_atoms)
# vectorize population for the alogrithm
population = population.reshape(population_size, number_of_atoms * dimensionality)
x_best = population[x_best_index]
x_best_fvalue = min_value
while(True):
if (max_iterations <= iteration): break
iteration += population_size
if selection_method == "rw":
selected_population = roulette_wheel_selection(population, population_evaluations, selective_pressure)
else:
selected_population = tournament_selection(population, population_evaluations, tournament_size, float)
crossover_population = crossover_real_population(selected_population, crossover_rate)
mutated_population = mutation_real_population(crossover_population, mutation_rate, Umin, Umax)
# create mutated_population as array for evaluation
array_mutated_population = mutated_population.reshape(mutated_population.shape[0], number_of_atoms, dimensionality)
mutated_population_evaluations, _, _ = evaluate_population(array_mutated_population, number_of_atoms)
population, population_evaluations = new_population_top_N(population, mutated_population, population_evaluations, mutated_population_evaluations)
if population_evaluations[0] < x_best_fvalue:
x_best, x_best_fvalue = population[0], population_evaluations[0]
best_iteration = iteration
print("Iterations: %d/%d Lennard-Jones potential: %.10f!!!" % (iteration, max_iterations, x_best_fvalue))
else:
print("Iterations: %d/%d Lennard-Jones potential: %.10f" % (iteration, max_iterations, x_best_fvalue))
x_best = x_best.reshape(number_of_atoms, dimensionality)
population = population.reshape(population.shape[0], number_of_atoms, dimensionality)
return x_best, x_best_fvalue.item(), population, population_evaluations, best_iteration
# Particle Swarm Optimization
def initialize_velocity(swarm_size, number_of_atoms, dimensionality, max_velocity):
velocity = np.random.uniform(low=-max_velocity, high=max_velocity, size=(swarm_size, number_of_atoms, dimensionality))
return velocity
def create_neighborhoods(swarm_size, neighborhood_radius):
# neighborhood_radius = [0, N/2]
neighborhoods = list()
for i in range(0, swarm_size):
neighborhood_i = list()
for j in range(i - neighborhood_radius, i + neighborhood_radius + 1):
neighborhood_i.append(j % swarm_size)
neighborhoods.append(np.asarray(neighborhood_i))
return np.asarray(neighborhoods)
def update_velocity(swarm, velocity, best_positions, best_positions_evaluations, neighborhoods, c1=2.05, c2=2.05, x=0.729):
rgn_0 = np.random.uniform(low=0, high=1)
rgn_1 = np.random.uniform(low=0, high=1)
best_neighbors = np.zeros(shape=swarm.shape)
for particle_i, neighbors_i in enumerate(neighborhoods):
best_neighbor_index = 0
best_neighbor_f_value = best_positions_evaluations[neighbors_i[0]]
for j, neighbor_ij in enumerate(neighbors_i):
neighborij_f_value = best_positions_evaluations[neighbor_ij]
if neighborij_f_value < best_neighbor_f_value:
best_neighbor_index = j
best_neighbor_f_value = neighborij_f_value
best_neighbors[particle_i] = swarm[neighbors_i[best_neighbor_index]]
velocity = x * (velocity + rgn_0 * c1 * (best_positions - swarm) + rgn_1 * c2 * (best_neighbors - swarm))
return velocity
def update_particles(swarm, velocity):
return swarm + velocity
def check_velocity_bounds(velocity, max_velocity):
for i, vi in enumerate(velocity):
for j, vij in enumerate(vi):
if vij < -max_velocity:
vij = -max_velocity
elif max_velocity < vij:
vij = max_velocity
vi[j] = vij
velocity[i] = vi
return velocity
def check_particles_bounds(swarm, Umin, Umax):
for i, pi in enumerate(swarm):
for j, pij in enumerate(pi):
if pij < Umin:
pij = Umin
elif Umax < pij:
pij = Umax
pi[j] = pij
swarm[i] = pi
return swarm
def update_best_positions(best_positions, best_positions_evaluations, swarm, swarm_evaluations):
for i, _ in enumerate(swarm_evaluations):
if swarm_evaluations[i] < best_positions_evaluations[i]:
best_positions[i] = swarm[i]
best_positions_evaluations[i] = swarm_evaluations[i]
return best_positions, best_positions_evaluations
def Particle_Swarm_Optimization(Umin, Umax, number_of_atoms, model):
# Algorithm parameters
swarm_size = 1000
alpha = 0.5 # alpha = [0, 1]
# neighborhood_radius = [0, swarm_size / 2]
if model == "lbest":
neighborhood_radius = 5
else:
neighborhood_radius = int(swarm_size / 2)
# Do not change
max_velocity = alpha * (Umax - Umin)
iteration = swarm_size
best_iteration = iteration
max_iterations = number_of_atoms * 1e+5
dimensionality = 3
# Initializations
swarm = initialize_real_population(Umin, Umax, swarm_size, number_of_atoms, dimensionality)
velocity = initialize_velocity(swarm_size, number_of_atoms, dimensionality, max_velocity)
best_positions = | np.array(swarm) | numpy.array |
import numpy as np
from collections import defaultdict
import torch
import pandas as pd
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import connected_components
import cosypose.utils.tensor_collection as tc
from cosypose.lib3d.transform_ops import invert_T, compute_transform_from_pose9d
from cosypose.lib3d.camera_geometry import project_points
from cosypose.lib3d.symmetric_distances import symmetric_distance_reprojected
from .ransac import make_obj_infos
from cosypose.utils.logging import get_logger
from cosypose.utils.timer import Timer
logger = get_logger(__name__)
def make_view_groups(pairs_TC1C2):
views = pairs_TC1C2.infos.loc[:, ['view1', 'view2']].values.T
views = np.unique(views.reshape(-1))
view_df = pd.DataFrame(dict(view_id=views, view_local_id=np.arange(len(views))))
view_to_id = view_df.set_index('view_id')
view1 = view_to_id.loc[pairs_TC1C2.infos.loc[:, 'view1'], 'view_local_id'].values
view2 = view_to_id.loc[pairs_TC1C2.infos.loc[:, 'view2'], 'view_local_id'].values
data = np.ones(len(view1))
n_views = len(views)
graph = csr_matrix((data, (view1, view2)), shape=(n_views, n_views))
n_components, ids = connected_components(graph, directed=True, connection='strong')
view_df['view_group'] = ids
view_df = view_df.drop(columns=['view_local_id'])
return view_df
class SamplerError(Exception):
pass
class MultiviewRefinement:
def __init__(self, candidates, cameras, pairs_TC1C2, mesh_db):
self.device, self.dtype = candidates.device, candidates.poses.dtype
self.mesh_db = mesh_db
cameras = cameras.to(self.device).to(self.dtype)
pairs_TC1C2 = pairs_TC1C2.to(self.device).to(self.dtype)
view_ids = np.unique(candidates.infos['view_id'])
keep_ids = np.logical_and(
np.isin(pairs_TC1C2.infos['view1'], view_ids),
np.isin(pairs_TC1C2.infos['view2'], view_ids),
)
pairs_TC1C2 = pairs_TC1C2[ | np.where(keep_ids) | numpy.where |
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import copy
from flearn.models.client import Client
from flearn.utils.model_utils import Metrics
from flearn.utils.tf_utils import process_grad, norm_grad, norm_grad_sparse
class BaseFedarated(object):
def __init__(self, params, learner, dataset):
# transfer parameters to self
for key, val in params.items(): setattr(self, key, val);
# create worker nodes
tf.reset_default_graph()
self.client_model = learner(*params['model_params'], self.q, self.inner_opt, self.seed)
self.clients = self.setup_clients(dataset, self.dynamic_lam, self.client_model)
print('{} Clients in Total'.format(len(self.clients)))
self.latest_model = copy.deepcopy(self.client_model.get_params())
# initialize system metrics
self.metrics = Metrics(self.clients, params)
def __del__(self):
self.client_model.close()
def setup_clients(self, dataset, dynamic=0, model=None):
'''instantiates clients based on given train and test data directories
Return:
list of Clients
'''
users, groups, train_data, test_data = dataset
if len(groups) == 0:
groups = [None for _ in users]
all_clients = [Client(u, g, train_data[u], test_data[u], dynamic, model) for u, g in zip(users, groups)]
return all_clients
def train_error(self):
num_samples = []
tot_correct = []
losses = []
self.client_model.set_params(self.latest_model)
for c in self.clients:
ct, cl, ns = c.train_error()
tot_correct.append(ct*1.0)
losses.append(cl * 1.0)
num_samples.append(ns)
return np.array(num_samples), np.array(tot_correct), np.array(losses)
def test(self):
'''tests self.latest_model on given clients
'''
num_samples = []
tot_correct = []
losses = []
self.client_model.set_params(self.latest_model)
for c in self.clients:
ct, cl, ns = c.test()
tot_correct.append(ct*1.0)
num_samples.append(ns)
losses.append(cl * 1.0)
return np.array(num_samples), np.array(tot_correct), np.array(losses)
def validate(self):
'''tests self.latest_model on given clients
'''
num_samples = []
tot_correct = []
self.client_model.set_params(self.latest_model)
for c in self.clients:
ct, ns = c.validate()
tot_correct.append(ct*1.0)
num_samples.append(ns)
return np.array(num_samples), np.array(tot_correct)
def test_resulting_model(self):
num_samples = []
tot_correct = []
# self.client_model.set_params(self.latest_model)
for c in self.clients:
ct, ns = c.test()
tot_correct.append(ct*1.0)
num_samples.append(ns)
ids = [c.id for c in self.clients]
groups = [c.group for c in self.clients]
return ids, groups, num_samples, tot_correct
def save(self):
pass
def select_clients(self, round, corrupt_id, num_clients=20):
'''selects num_clients clients weighted by number of samples from possible_clients
Args:
num_clients: number of clients to select; default 20
note that within function, num_clients is set to
min(num_clients, len(possible_clients))
Return:
indices: an array of indices
self.clients[]
'''
num_clients = min(num_clients, len(self.clients)) # number of selected clients per round
np.random.seed(round+4)
non_corrupt_id = np.setdiff1d(range(len(self.clients)), corrupt_id)
corrupt_fraction = len(corrupt_id) / len(self.clients)
num_selected_corrupted = int(num_clients * corrupt_fraction)
if self.sampling == 0:
indices = np.random.choice(range(len(self.clients)), num_clients, replace=False, p=pk)
return indices, np.asarray(self.clients)[indices]
elif self.sampling == 1:
num_samples = []
for client in self.clients:
num_samples.append(client.train_samples)
total_samples = np.sum(np.asarray(num_samples))
pk = [item * 1.0 / total_samples for item in num_samples]
indices1 = np.random.choice(corrupt_id, num_selected_corrupted, replace=False, p=np.asarray(pk)[corrupt_id] / sum(np.asarray(pk)[corrupt_id]))
indices2 = np.random.choice(non_corrupt_id, num_clients-num_selected_corrupted, replace=False, p=np.asarray(pk)[non_corrupt_id] / sum(np.asarray(pk)[non_corrupt_id]))
indices = np.concatenate((indices1, indices2))
#print(indices1, indices2)
return indices, | np.asarray(self.clients) | numpy.asarray |
import numpy as np
import logging
_FFMPEG_INSTALLED = True
try:
import ffmpeg
except Exception:
_FFMPEG_INSTALLED = False
logger = logging.getLogger(__name__)
def video_write(fn, images, framerate=60, vcodec="libx264"):
"""
Save list of images to a video file.
Source:
https://github.com/kkroening/ffmpeg-python/issues/246#issuecomment-520200981
Modified so that framerate is given to .input(), as suggested in the
thread, to avoid
skipping frames.
Parameters
----------
fn : string
filename
images : list or np.array
list of images to save to a video.
framerate : int
"""
global _FFMPEG_INSTALLED
try:
if len(images) == 0:
logger.warning("Calling video_write() with empty images.")
return
if not _FFMPEG_INSTALLED:
logger.error(
"video_write(): Unable to save video, ffmpeg-python \
package required (https://github.com/kkroening/ffmpeg-python)"
)
return
if not isinstance(images, np.ndarray):
images = | np.asarray(images) | numpy.asarray |
# Copyright 2022 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
class Component:
def __init__(self, name, mode_name, current_ma):
self.name = name
self.mode_name = mode_name
self.current_ma = current_ma
class Stage:
def __init__(self, delta_t_sec, components):
self.delta_t_sec = delta_t_sec
self.components = components
class Thread:
def __init__(self, name, stages):
self.name = name
self.stages = stages
self.num_stages = len(stages)
self.last_stage_change_t = 0.0
self.next_stage_change_t = self.last_stage_change_t + stages[0].delta_t_sec
self.stage_index = 0
class SolarPanel:
def __init__(self, rated_power_W, charge_efficiency=0.7, t_offset_sec = 0.0,
clouds_tau=3600.0, clouds_cover = 1.0):
self.rated_power_W = rated_power_W
self.charge_efficiency = charge_efficiency
self.t_offset_sec = t_offset_sec
self.clouds_tau = clouds_tau
self.clouds_cover = clouds_cover
self.random_walk_val = 0.0
self.last_time_s = 0.0
self.time = []
self.power_history_W = []
self.random_walk_vals = []
def calculate_power(self, t):
dt = t - self.last_time_s
if(dt > 0.0):
f = np.exp(-dt/self.clouds_tau)
self.random_walk_val = f*self.random_walk_val + np.sqrt(1.0-f**2.0) * np.random.randn()
power = self.rated_power_W*(1.0/0.65)*(np.sin(((2.0*np.pi)/86400)*(t-self.t_offset_sec)) - 0.35)
if power < 0.0:
power = 0.0
# Calculate threshold based on normal distribution
if | np.abs(self.random_walk_val) | numpy.abs |
# coding: utf8
# !/usr/env/python
# This file has tests for the old style output writers to ensure backwards
# compatibility. All of the existing tests for output writers are kept as is.
# There are a few new ones too.
import glob
import os
import numpy as np
from terrainbento import Basic, NotCoreNodeBaselevelHandler
from terrainbento.utilities import filecmp
_TEST_OUTPUT_DIR = os.path.join(os.curdir, "output")
_TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
def get_output_filepath(filename):
return os.path.join(_TEST_OUTPUT_DIR, filename)
def cleanup_files(searchpath):
files = glob.glob(searchpath)
for f in files:
os.remove(f)
# Some output writers
def output_writer_function_a(model):
average_elevation = | np.mean(model.z[model.grid.core_nodes]) | numpy.mean |
import argparse
import glob
import os
import sys
import time
from itertools import product, permutations
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.nonparametric.api as smnp
import swifter
N_PROC = 1
CHUNK = 25
MIX = False
BASE_DIR = '/home/jmcbride/Scales/Compared_data'
RAW_DIR = '/home/jmcbride/Scales/Toy_model/Data/Raw/'
PRO_DIR = '/home/jmcbride/Scales/Toy_model/Data/Processed/'
DIST_DIR = '/home/jmcbride/Scales/Toy_model/Data/None_dist/'
REAL_DIR = '/home/jmcbride/Scales/Real_scales'
TEMP_MIN = 50.
TEMP_MAX = 300.
TEMP_LOW_MARGIN = 0.50
TEMP_HI_MARGIN = 1.50
N_TRIALS = 50
ALPHA_W = 0.1
def parse_arguments():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--partabase', action='store', default='None', type=str)
parser.add_argument('-f', action='store', default='None', dest='fName', type=str)
parser.add_argument('--sample', action='store_true', default=False, dest='sample',)
return parser.parse_args()
args = parse_arguments()
def get_scale_from_pair_ints(pair_ints):
ints = [int(y) for y in pair_ints.split(';')]
return ';'.join(['0'] + [str(y) for y in np.cumsum(ints)])
def calculate_most_harmonic_neighbour(int_cents, sim_only=False, CENT_DIFF_MAX=22):
best_ratio = [1,1]
max_similarity = 0.0
cents = 0.0
for x in np.arange(1,75, dtype=float):
cent_diff = 1200.* | np.log10((x+1.)/x) | numpy.log10 |
import occultquad
import numpy as np
def s2(r, b):
if (b >= 1 + r):
return (2 * np.pi / 3)
elif (b <= r - 1):
return 0
r_ = np.array([r], dtype=float)
b_ = np.array([b], dtype=float)
flux = | np.empty(1) | numpy.empty |
'''
Pull out HI properties (and/or others) from a set of point sources.
Create a distance map as a function of distance from the nearest source.
'''
import astropy.coordinates as coord
from astropy.table import Table, Column
import astropy.units as u
import astropy.constants as const
import numpy as np
from galaxies import Galaxy
import scipy.ndimage as nd
from astropy.io import fits
from spectral_cube import SpectralCube, Projection
from spectral_cube.analysis_utilities import stack_spectra
from astropy.utils.console import ProgressBar
import matplotlib.pyplot as plt
from plotting_styles import default_figure
from constants import hi_freq, hi_mass_conversion
# from paths import allfigs_path
def distance_map_from_catalogue(gal, tab, header, ra_key="RA", dec_key="Dec",
diam_key=None):
'''
Create a distance map from a set of sky location in a catalogue.
'''
if not isinstance(gal, Galaxy):
raise TypeError("gal must be a Galaxy instance.")
ra = tab[ra_key]
dec = tab[dec_key]
coords = coord.SkyCoord(ra, dec, frame='icrs', unit=(u.deg, u.deg))
# Assumes that the table column has a unit attached that Table can distinguish
if diam_key is not None:
# Assume pc. Lost units in the save table??
diams = tab[diam_key].quantity * u.pc
radii = gal.radius(header=header)
coord_map = gal.skycoord_grid(header=header)
object_mask = np.zeros_like(coord_map.ra.value, dtype=int)
# Loop through and mask points belonging at a remnant, or the nearest point
for i, co in enumerate(coords):
mask_index = np.unravel_index(coord_map.separation(co).argmin(),
object_mask.shape)
if diam_key is not None:
# Major axis diameter
diam_rad = (diams[i].to(u.pc) / gal.distance).to(u.dimensionless_unscaled).value * u.rad
diam_pix = diam_rad.to(u.deg).value / np.abs(header['CDELT2'])
# Gather all pixels with a circular region
yy, xx = np.mgrid[-(int(diam_pix)//2 + 1):int(diam_pix)//2 + 1,
-(int(diam_pix)//2 + 1):int(diam_pix)//2 + 1]
# Find all pixels within the diameter
valids = np.where(np.sqrt(yy**2 + xx**2) < diam_pix / 2.)
y_pts = valids[0] + mask_index[0]
x_pts = valids[1] + mask_index[1]
mask_index = (y_pts, x_pts)
object_mask[mask_index] = i + 1
# print(object_mask[mask_index])
# print(mask_index)
# print((object_mask > 0).sum())
dist_transf = nd.distance_transform_edt(~(object_mask > 0))
return object_mask, dist_transf
def find_bubble_props(dist_bins, int_profile, lwidth_profile, obj_diam,
disk_height=100 * u.pc / np.cos(55.1 * u.deg),
mass_conv_factor=None):
'''
Dumb estimations of bubble properties based on integrated intensity and
line width profiles.
'''
# Define the shell radius based on the distance of the peak
arg_max = np.argmax(int_profile)
# If the centre is the peak, assume it is unresolved
if arg_max == 0:
shell_rad = obj_diam / 2.
else:
shell_rad = obj_diam / 2. + dist_bins[arg_max]
# Assume a disk scale height and check if the radius of the shell
# exceeds it
if shell_rad > disk_height:
# It has maybe broken out of the disk. Adjust volume as needed
# Subtract off caps of the sphere
vol = (4 * np.pi / 3.) * shell_rad**3 - \
(2 * np.pi / 3.) * (shell_rad - disk_height)**2 * (2 * shell_rad + disk_height)
else:
# Likely still contained within the disk
vol = (4 * np.pi / 3.) * shell_rad**3
# Awful estimations of the velocity expansion. Assume velocity dispersion
# is exactly the same...
# Don't know how to do that with any sort of logic applied, so let it be
# the dispersion in the peak bin.
v_exp = lwidth_profile[arg_max]
# Now the integ intensity. If unresolved, we don't have an estimate of the
# background. Assume the last distance bin as a background?? Otherwise take
# the larger of the innermost and outermost when resolved.
peak_int = int_profile[arg_max]
if arg_max == 0:
bkg_int = int_profile[-1]
else:
bkg_int = max(int_profile[0], int_profile[-1])
hole_mass = np.pi * shell_rad**2 * bkg_int
shell_mass = np.pi * shell_rad**2 * \
(peak_int - bkg_int)
if mass_conv_factor is not None:
hole_mass *= mass_conv_factor
shell_mass *= mass_conv_factor
# Estimate an avg volume density within the hole. Don't do this
# for unresolved holes
if arg_max == 0:
energy = np.NaN * u.erg
vol_dens = np.NaN * u.cm**-3
else:
# Chevalier 74 expansion energy formula
vol_dens = ((shell_mass / (1.4 * const.m_p)) / vol).to(u.cm**-3)
energy = 5.3e43 * vol_dens.value**1.12 * \
shell_rad.to(u.pc).value**3.12 * v_exp.to(u.km / u.s).value**1.4 * u.erg
return shell_rad, vol, v_exp, hole_mass, shell_mass, vol_dens, energy
default_figure()
# Update this for the server files (geometry should be the same though)
gal = Galaxy("M33")
gal.distance = 840 * u.kpc
hi_cube = SpectralCube.read("/Volumes/Travel_Data/M33_2/HI/M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.fits")
peak_vel = Projection.from_hdu(fits.open("/Volumes/Travel_Data/M33_2/HI/M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.peakvels.fits"))
mom0 = Projection.from_hdu(fits.open("/Volumes/Travel_Data/M33_2/HI/M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.mom0.fits"))
beam = mom0.beam
moment0_Kkm_s = beam.jtok(hi_freq).value * mom0.value / 1000.
moment0_surfdens = moment0_Kkm_s * hi_mass_conversion * (u.K * u.km / u.s) * np.cos(55.1 * u.deg)
lwidth = Projection.from_hdu(fits.open("/Volumes/Travel_Data/M33_2/HI/M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.lwidth.fits"))
snr_tab = Table.read("/Volumes/Travel_Data/M33_2/MMT_SNR_catalogue_long18_combined.txt",
format='ascii')
# Also consider weighting by something like ~1/sqrt(L) to place distances
# on a common "scale"
index_mask, dist_transf = \
distance_map_from_catalogue(gal, snr_tab, hi_cube.header,
diam_key='D')
# Get all points within ~100 pc.
dist_limit = np.arange(10) * 100 * u.pc
stacked_spectra = []
lwidth_bins = []
intint_bins = []
# Pick out individual regions
num = index_mask.max()
for n in ProgressBar(range(1, num + 1)):
reg_mask = index_mask == n
dist_transf_reg = nd.distance_transform_edt(~reg_mask)
lwidth_reg = []
intint_reg = []
# Calculate avg properties within the region
lwidth_reg.append([np.nanmean(lwidth[reg_mask].value),
np.nanstd(lwidth[reg_mask].value) / np.sqrt(reg_mask.sum() / 41.)])
intint_reg.append([np.nanmean(moment0_surfdens[reg_mask].value),
np.nanstd(moment0_surfdens[reg_mask].value) / np.sqrt(reg_mask.sum() / 41.)])
for i, (low, high) in enumerate(zip(dist_limit[:-1], dist_limit[1:])):
# print("On bin {}".format(i + 1))
dist_ang_low = (low / gal.distance.to(u.pc)).value * u.rad
dist_pix_low = dist_ang_low.to(u.deg).value / np.abs(hi_cube.header['CDELT2'])
dist_ang_high = (high / gal.distance.to(u.pc)).value * u.rad
dist_pix_high = dist_ang_high.to(u.deg).value / np.abs(hi_cube.header['CDELT2'])
dist_mask = np.logical_and(dist_transf_reg > dist_pix_low,
dist_transf_reg <= dist_pix_high)
num_beams = dist_mask.sum() / 41.
intint_reg.append([np.nanmean(moment0_surfdens[dist_mask].value),
| np.nanstd(moment0_surfdens[dist_mask].value) | numpy.nanstd |
import numpy as np
from numba import *
from numba.typed import Dict
from numba.types import bool_
import timeit
from time import process_time
@njit
def to_dependency_network(U: np.array, force_select: np.array, extended=False):
"""Form a dependency network such that the set of nodes S contains all
graphlets and graphlets s,k \in U are connected if they share a force selected node
v in their intersection. Edge weight $w$ is defined as $$ w_{sk} = |v \cap s \cap k|$$.
Parameter `extended` controls for which type of dependency is used. By default the
criteria is relaxed such that only graphlets that share force selected node(s) will be
considered.
Parameters
----------
U : weighted adjacency matrix for the dependency network
force_select : set of nodes (as numpy array) that each graphlet configuration
is required to contain
extended : by default dependency edge is added only if graphlets contain same force
selected node(s). Set True for building a network such that it takes into
account all overlapping seed nodes
Returns
-------
U_out : output graphlets as rows of an array with shape (n_out, k_max) where elements
are node indices in the adjacency matrix of the input network. Rows are
padded from the right with -1 for graphlet sizes < k_max. (Note that n_out <= n)
E_const : weighted adjacency matrix of the dependency network where nodes
represent graphlets and edges dependencies between graphlets.
Numpy array dimensions are (n_out,n_out)
fs_map : mapping from force selected nodes to graphlets, numpy array dimensions:
(n_out, n_fs)
idxs : numpy array of row indeces that can be used for selecting the rows in U_out from U
"""
n = len(U)
n_fs = len(force_select)
E_const = np.zeros((n,n))
fs_map = np.zeros((n, n_fs), dtype=bool_)
fs_idsn = Dict()
fs = set(force_select)
for k, v in zip(force_select, np.arange(n_fs)):
fs_idsn[k] = v
for i, Si in enumerate(U):
fs_Si = fs & set(Si)
if len(fs_Si) > 0:
for u in fs_Si:
fs_map[i,fs_idsn[u]] = True
for i,Si in enumerate(U):
for j,Sj in enumerate(U):
if i != j:
Si_s = set(Si)
Sj_s = set(Sj)
l_ij = len(Si_s & Sj_s) if extended else len(Si_s & Sj_s & fs)
E_const[j,i] = len(Si_s & Sj_s) if l_ij > 0 else 0
idxs = np.array([(E_const[i,:] > 0).sum() != 0
for i in range(E_const.shape[0])])
E_const = E_const[idxs,:][:,idxs]
fs_map = fs_map[idxs,:]
return U[idxs,:], E_const, fs_map, idxs
@njit
def maximal_independent_set(A: np.array) -> np.array:
"""Sample maximal independent set (numpy implementation) using a
greedy strategy.
An independent set is a set of nodes such that the subgraph
of G induced by these nodes contains no edges. A maximal
independent set is an independent set such that it is not possible
to add a new node and still get an independent set.
Parameters
----------
A : weighted adjacency matrix for the dependency network
Returns
-------
s : indices of nodes in the found maximal independent set
"""
n = A.shape[0]
D = np.array([False for _ in range(n)])
I = np.array([False for _ in range(n)])
seed_node = np.random.choice(n)
neighbors = np.where(A[seed_node,:])[0]
D[seed_node] = I[seed_node] = True
D[neighbors] = True
while np.sum(D) < n:
node = np.random.choice(np.where(D != True)[0])
I[node] = True
node_neighbors = np.where(A[node,:])[0]
D[node_neighbors] = True
D[node] = True
s = np.where(I)[0]
return s
def sample_fs_configurations(A: np.array, U, fs, fs_map, target_time=30, adaptive=True,
tol=0.99, n_min_target=10000, c_limit: int=10, verbose=False) -> list:
"""Sample maximal independent sets to determine a configuration of force selected nodes
that guarantees near-minimal overlap.
Employs adaptive enumeration, where the run length is determined by target_time
parameter (and n_min_target sets the absolute minimum requirement).
Parameters
----------
A : weighted adjacency matrix of the input network
U : input graphlets as rows of an array with shape (n, k_max) where elements
are node indices in the adjacency matrix of the input network. Rows are
padded from the right with -1 for graphlet sizes < k_max
fs : set of nodes that each graphlet configuration is required to contain
fs_map : mapping of network nodes to graphlets (see `to_dependency_network` method)
target_time : max time threshold for the run (unit in seconds)
n_min_target : minimum number of configurations that should be generated
Returns
-------
ss (list of lists): configurations
fs (np.array): updated set of force selected nodes
Notes
-----
An independent set is a set of nodes such that the subgraph
of G induced by these nodes contains no edges. A maximal
independent set is an independent set such that it is not possible
to add a new node and still get an independent set.
"""
map_fs = lambda x: np.unique(np.where(fs_map[x,:])[1])
t = timeit.timeit(lambda: maximal_independent_set(A), number=100)
n_sample = np.max([np.int32(100 / t), 1000])
rng = np.random.default_rng()
u_sel = np.arange(U.shape[0])
n_fsa = len(fs)
if verbose:
print(':: Sampling configurations of s-nodes for force selection')
print(':: --> Targeting {:.0f}s, threshold set @ {:.2f}%'
.format(target_time, tol*100))
ls_exs = []
removed = []
enums = set()
c = np.zeros(n_fsa, dtype=int)
i = n_ss0 = exs0 = i0 = i0a = c_stop = 0
t0 = pt0 = process_time()
while pt0 - t0 < target_time or len(enums) < n_min_target:
for _ in range(n_sample):
s = maximal_independent_set(A)
fs_idxs = map_fs(s)
c[fs_idxs] += 1
i+=1
l_so = len(set(fs_idxs))
if l_so == len(fs):
s_prime = u_sel[s]
enums.add(frozenset(s_prime))
else:
ls_exs.append(l_so)
if verbose:
pt0, n_ss0, i0a = __output_stats(fs, enums, i, i0a, c, t0, pt0, n_ss0)
# EVALUATE
c_stop = c_stop + 1 if len(enums) == n_ss0 else 0
if c_stop == c_limit:
if verbose:
print(f'Run terminated due to {c_limit} successive empty batches.')
break
exs_frac = (len(ls_exs)-exs0) / (i - i0)
if exs_frac > tol:
if not adaptive:
raise Exception('Configurations that satisfy the complete set of ' \
'force selected nodes appear too infrequently. ' \
'Either remove the problematic nodes or enable '\
'adaptive running mode by setting `adaptive=True`.')
fs, c, fs_map, A, u_sel, removed = __drop(fs, c, fs_map, A, u_sel,
removed, verbose)
i0 = i
exs0 = len(ls_exs)
enums = set()
c_stop = 0
if verbose:
print(':: Discarded {:.2f}% of the configurations'.format(100*exs_frac))
tdelta = process_time() - t0
if verbose:
print(':: GENERATION STATS:\n* {} generated\n* {} accepted\n' \
'* {} discarded\n* {:.2f}% (+/-) {:.2f} of the FS nodes included on avg ' \
'\n* elapsed time: {:.2f}s.'.format(i,i-len(ls_exs),len(ls_exs),
np.mean(ls_exs) / n_fsa*100,
np.std(ls_exs) / n_fsa*100,
tdelta))
print(':: Found {} unique configurations, {} ({:.0f}%) force selected nodes were ' \
'removed: {}'.format(len(enums),len(removed),len(removed)/n_fsa*100,removed))
ss = [[v for v in e] for e in enums]
return ss, fs
def __drop(fs, c, fs_map, A, u_sel, removed, verbose):
"""Drop the worst performing node in the current force selected set by
occurrence in the generated configurations.
"""
c_argsort = np.argsort(c)
drop = c_argsort[0]
keep = c_argsort[1:]
if verbose:
print(':: Node', fs[drop],'removed.')
idxs = np.unique(np.where(fs_map[:, keep])[0])
A = A[idxs,:][:,idxs]
fs_map = fs_map[idxs,:][:,keep]
c = c[keep]
removed.append(fs[drop])
fs = fs[keep]
u_sel = u_sel[idxs]
return fs, c, fs_map, A, u_sel, removed
def __output_stats(fs, enums, i, i0a, c, t0, pt0, n_ss0):
"""Print iteration statistics in the `sample_fs_configurations` method.
"""
if i % 10 == 0:
pt1 = process_time()
if pt1 - pt0 > 10.0:
n_ss = len(enums)
rate_1_sec = np.round((i-i0a) / (pt1-pt0), 2)
rate_2_sec = | np.round((n_ss-n_ss0) / (pt1-pt0), 2) | numpy.round |
# this is the python library created for using BigGAN in evolution.
import sys
from os.path import join
sys.path.append("C:/Users/zhanq/OneDrive - Washington University in St. Louis/GitHub/pytorch-pretrained-BigGAN")
# sys.path.append("E:\Github_Projects\pytorch-pretrained-BigGAN")
from pytorch_pretrained_biggan import (BigGAN, one_hot_from_names, one_hot_from_int, truncated_noise_sample, convert_to_images)
import torch
import numpy as np
import matplotlib.pylab as plt
#%%
#%%
from numpy.linalg import norm
def orthonorm(ref, vec2):
res = vec2 - vec2 @ ref.T * ref / norm(ref, axis=1)**2
return res / norm(res) * norm(ref)
#%%
from scipy.stats import truncnorm
def convert_to_images_np(obj, scale=1.0):
""" Convert an output tensor from BigGAN in a list of images.
Params:
obj: tensor or numpy array of shape (batch_size, channels, height, width)
Output:
list of Pillow Images of size (height, width)
"""
try:
from PIL import Image
except ImportError:
raise ImportError("Please install Pillow to use images: pip install Pillow")
if not isinstance(obj, np.ndarray):
obj = obj.detach().numpy()
obj = obj.transpose((0, 2, 3, 1))
obj = np.clip(((obj + 1) / 2.0) * scale, 0, scale)
img = []
for i, out in enumerate(obj):
img.append(out)
return img
def truncated_noise_sample(batch_size=1, dim_z=128, truncation=1., seed=None):
""" Create a truncated noise vector.
Params:
batch_size: batch size.
dim_z: dimension of z
truncation: truncation value to use
seed: seed for the random generator
Output:
array of shape (batch_size, dim_z)
"""
state = None if seed is None else np.random.RandomState(seed)
values = truncnorm.rvs(-2, 2, size=(batch_size, dim_z), random_state=state).astype(np.float32)
return truncation * values
#%%
# Load pre-trained model tokenizer (vocabulary)
model = BigGAN.from_pretrained('biggan-deep-256')
model.to('cuda')
def BigGAN_render(class_vector, noise_vector, truncation):
if class_vector.shape[0] == 1:
class_vector = np.tile(class_vector, [noise_vector.shape[0], 1])
if noise_vector.shape[0] == 1:
noise_vector = np.tile(noise_vector, [class_vector.shape[0], 1])
class_vector = torch.from_numpy(class_vector.astype(np.float32)).to('cuda')
noise_vector = torch.from_numpy(noise_vector.astype(np.float32)).to('cuda')
with torch.no_grad():
output = model(noise_vector, class_vector, truncation)
imgs = convert_to_images(output.cpu())
return imgs
def BigGAN_embed_render(embed_vecs, noise_vecs=None, truncation=0.7, scale=255.0, batch=5):
if embed_vecs.shape[1] == 256:
input_vecs = torch.from_numpy(embed_vecs)
elif embed_vecs.shape[1] == 128:
if noise_vecs is None:
embed_vecs = torch.from_numpy(embed_vecs)
input_vecs = torch.cat((torch.zeros_like(embed_vecs), embed_vecs), dim=1)
else:
assert noise_vecs.shape[1] == 128
if noise_vecs.shape[0] == embed_vecs[0]:
input_vecs = torch.cat((torch.from_numpy(noise_vecs), torch.from_numpy(embed_vecs)), dim=1)
else:
assert noise_vecs.shape[0] == 1
noise_vecs = np.tile(noise_vecs, [embed_vecs.shape[0], 1])
input_vecs = torch.cat((torch.from_numpy(noise_vecs), torch.from_numpy(embed_vecs)), dim=1)
sample_n = input_vecs.shape[0]
imgs_all = []
csr = 0
csr_end = 0
while csr_end < sample_n:
csr_end = min(csr + batch, sample_n)
with torch.no_grad():
output = model.generator(input_vecs[csr:csr_end, :].float().cuda(), truncation).cpu()
# imgs = convert_to_images(output.cpu())
# imgs = [np.array(img).astype(np.float64) / 255 * scale for img in imgs]
imgs = convert_to_images_np(output, scale)
imgs_all.extend(imgs)
csr = csr_end
return imgs_all
if __name__=="__main__":
# %%
# Prepare a input
batch_size = 3
truncation = 0.5
class_vector = one_hot_from_names(['soap bubble', 'coffee', 'mushroom'], batch_size=batch_size)
noise_vector = truncated_noise_sample(truncation=truncation, batch_size=batch_size)
#noise_vector = truncated_noise_sample(truncation=truncation, batch_size=1)
# All in tensors
#noise_vector = torch.from_numpy(np.ones([3, 128]).astype(np.float32)) #
noise_vector = torch.from_numpy(noise_vector)
class_vector = torch.from_numpy(class_vector)
# If you have a GPU, put everything on cuda
noise_vector = noise_vector.to('cuda')
class_vector = class_vector.to('cuda')
model.to('cuda')
# Generate an image
with torch.no_grad():
output = model(noise_vector, class_vector, truncation)
imgs = convert_to_images(output.cpu())
#%% 1d interpolation
truncation = 0.7
batch_size = 11
class_vector = one_hot_from_names(['mushroom']*batch_size, batch_size=1)
noise_vector = truncated_noise_sample(truncation=truncation, batch_size=1)
scale_vec = np.arange(-1, 1.1, 0.2)
noise_vec_scale = scale_vec.reshape([-1, 1])*noise_vector
imgs = BigGAN_render(class_vector, noise_vec_scale, truncation=truncation)
#%
figh = plt.figure(figsize=[25, 3])
gs = figh.add_gridspec(1, len(imgs)) # 1d interpolation
for i, img in enumerate(imgs):
plt.subplot(gs[i])
plt.imshow(img)
plt.axis('off')
plt.title("{0:.2f}".format(scale_vec[i]), fontsize=15,)
plt.show()
#%%
savedir = r"C:\Users\zhanq\OneDrive - Washington University in St. Louis\Generator_Testing\BigGAN256"
truncation = 0.7
# batch_size = 11
classname = 'goldfish'
class_vector = one_hot_from_names([classname], batch_size=1)
#%% 1d interpolation and save
truncation = 0.7
noise_vector = truncated_noise_sample(truncation=truncation, batch_size=1)
scale_UL = 1; scale_BL = -scale_UL; sample_n = 11
scale_vec = np.linspace(scale_BL, scale_UL, sample_n)
# scale_vec = np.linspace(-2.5, -0.9, sample_n)
noise_vec_scale = scale_vec.reshape([-1, 1])*noise_vector
imgs = BigGAN_render(class_vector, noise_vec_scale, truncation=truncation)
figh = plt.figure(figsize=[25, 3])
gs = figh.add_gridspec(1, len(imgs)) # 1d interpolation
for i, img in enumerate(imgs):
plt.subplot(gs[i])
plt.imshow(img)
plt.axis('off')
plt.title("{0:.1f}".format(scale_vec[i]), fontsize=15,)
plt.savefig(join(savedir, "%s_UL%.1f_BL%.1f_trunc%.1f_%04d.png" % (classname, scale_UL, scale_BL, truncation, np.random.randint(10000))))
plt.show()
#%% 2d linear interpolation through center
savedir = r"C:\Users\zhanq\OneDrive - Washington University in St. Louis\Generator_Testing\BigGAN256"
truncation = 0.7
# batch_size = 11
classname = 'goldfish'
class_vector = one_hot_from_names([classname], batch_size=1)
truncation = 0.7
noise_vector = truncated_noise_sample(truncation=truncation, batch_size=2)
vec1 = noise_vector[0:1, :]
vec2 = orthonorm(vec1, noise_vector[1:2, :])
xlim = (-1, 1)
ylim = (-1, 1); sample_n = 11
x_scale_vec = np.linspace(*xlim, sample_n)
y_scale_vec = np.linspace(*ylim, sample_n)
# scale_vec = np.linspace(-2.5, -0.9, sample_n)
imgs = []
for ydeg in y_scale_vec:
noise_vec_scale = x_scale_vec[:, np.newaxis] * vec1 + ydeg * vec2
img_row = BigGAN_render(class_vector, noise_vec_scale, truncation=truncation)
imgs.append(img_row)
#%
figh = plt.figure(figsize=[25, 25])
gs = figh.add_gridspec(len(y_scale_vec), len(x_scale_vec)) # 2d interpolation
for i, img_row in enumerate(imgs):
for j, img in enumerate(img_row):
plt.subplot(gs[i, j])
plt.imshow(img)
plt.axis('off')
plt.title("%.1f, %.1f"%(x_scale_vec[i], y_scale_vec[j]), fontsize=15,)
plt.tight_layout()
plt.savefig(join(savedir, "%s_[%.1f-%.1f]_[%.1f-%.1f]_trunc%.1f_%04d.png" % (classname, *xlim, *ylim, truncation, np.random.randint(10000))))
plt.show()
#%% 2d interpolation in sphere
savedir = r"C:\Users\zhanq\OneDrive - Washington University in St. Louis\Generator_Testing\BigGAN256"
truncation = 0.4
# batch_size = 11
classname = 'goldfish'
class_vector = one_hot_from_names([classname], batch_size=1)
truncation = 0.4
noise_vector = truncated_noise_sample(truncation=truncation, batch_size=3)
vec1 = noise_vector[0:1, :]
vec2 = orthonorm(vec1, noise_vector[1:2, :])
vec3 = orthonorm(vec2, noise_vector[2:3, :])
vec3 = orthonorm(vec1, vec3)
sample_n = 11
phi_scale_vec = | np.linspace(-90, 90, sample_n) | numpy.linspace |
import numpy as np
from pkg.dist import dist
def weights_c(X, C):
w = np.zeros(C.shape[0])
for i in range(len(X)):
if X[i] not in C:
ind = np.argmin([dist(X[i],c,0) for c in C])
w[ind] +=1
return w
def weighted_kmeans(Cen,w,k,random_state=42):
rs = np.random.RandomState(random_state)
ran = rs.randint(len(Cen))
C1 = Cen[ran,:].reshape(1,Cen.shape[1])
X1 = np.delete(Cen, ran, 0)
w= | np.delete(w,ran) | numpy.delete |
import copy
import glob
import os
import time
import pickle
import matchingpennies
from collections import deque
import gym
import gym_compete
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from a2c_ppo_acktr import algo, utils
from a2c_ppo_acktr.algo import gail
from a2c_ppo_acktr.arguments import get_args
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.model import Policy
from a2c_ppo_acktr.storage import RolloutStorage
from evaluation import evaluate, evaluate_multi, eval_movie
from stable_baselines3.common.running_mean_std import RunningMeanStd
class MpPolicy:
def __init__(self):
self.vals = np.array([1.1,0.1])
self.counts = np.array([1,1])
self.eps = .2
#self.vals = np.zeros(2) + 1e-10
def update(self, obs, action, reward):
if isinstance(action, list):
for a, r in zip(action, reward):
self.vals[action] += reward
self.counts[action] += 1
else:
self.vals[action] += reward
self.counts[action] += 1
@property
def probs(self):
probs = np.exp(self.vals/self.counts) / np.exp(self.vals/self.counts).sum()
return probs
def act(self, obs):
probs = self.probs
actions = []
if isinstance(obs, list):
for _ in obs:
if np.random.rand() < self.eps:
actions.append(np.random.choice([0,1]))
else:
actions.append(np.random.choice([0,1],p=probs))
else:
actions.append(np.random.choice([0,1],p=probs))
return np.array(actions)
class Agent:
def __init__(self, playerid):
self.playerid = playerid
self.policies = []
def add_policy(self, policy):
# TODO: need to add a wrapper that adds the obs_rms to each actor as we run it...
self.policies.append(policy)
self.weights = np.arange(1, len(self.policies)+1).astype(float)
self.weights[-1] *= 2
self.weights /= self.weights.sum()
def sample_policy(self):
ind = np.random.choice(range(len(self.policies)), p=self.weights)
return self.policies[ind]
def init(self, num_envs):
self.env_policies = []
for _ in range(num_envs):
policy = self.sample_policy()
self.env_policies.append(policy)
def new_policy(self, ind):
policy = self.sample_policy()
self.env_policies[ind] = policy
def act(self, obs, *args, **kwargs):
values = []
actions = []
logprobs = []
rhs = []
with torch.no_grad():
for i, o in enumerate(obs):
policy = self.env_policies[i]
action = policy.act(o)
actions.append(action)
actions = np.array(actions)
#if len(actions) > 1:
# actions = np.concatenate(actions)
return actions
class EnvWrapper:
def __init__(self, env_name, num_envs):
self.envs = []
for _ in range(num_envs):
self.envs.append(gym.make(env_name))
def step(self, actions):
steps = []
for i, a in enumerate(actions):
steps.append(self.envs[i].step(a))
obs = [s[0] for s in steps]
r = [s[1] for s in steps]
dones = [s[2] for s in steps]
infos = [s[3] for s in steps]
return obs, np.array(r).T, dones, infos
def reset(self):
obs = []
for env in self.envs:
obs.append(env.reset())
return obs
def main():
args = get_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_num_threads(1)
device = torch.device("cuda:0" if args.cuda else "cpu")
envs = EnvWrapper(args.env_name, args.num_processes)
#envs = make_vec_envs(args.env_name, args.seed, args.num_processes,
# args.gamma, args.log_dir, device, False, dense=args.dense)
envs.reset()
agent0 = MpPolicy()
agent1 = MpPolicy()
player0_policies = []
player1_policies = []
obs_rms_cache = []
episode_rewards0 = deque(maxlen=10)
episode_rewards1 = deque(maxlen=10)
train_stats = []
eval_p0 = []
eval_p1 = []
cache_freq = args.cache_freq
learn_agent0 = agent0
old_agent0 = Agent(0)
old_agent0.add_policy(copy.deepcopy(agent0))
old_agent0.init(args.num_processes//2)
learn_agent1 = agent1
old_agent1 = Agent(1)
old_agent1.add_policy(copy.deepcopy(agent1))
old_agent1.init(args.num_processes//2)
p0numep = 0
p1numep = 0
#num_updates = int(
# args.num_env_steps) // args.num_steps // args.num_processes
num_updates = 50
for j in range(num_updates):
eps = -1
p0draws, p1draws = 0, 0
w0, w1 = 0, 0
epoch_p0ep, epoch_p1ep = 0, 0
while eps < args.num_episodes:
start = time.time()
for step in range(args.num_steps):
# Sample actions
with torch.no_grad():
action0a = agent0.act([None for _ in range(args.num_processes//2)])
action0b = old_agent0.act([None for _ in range(args.num_processes//2)])
action1a = old_agent1.act([None for _ in range(args.num_processes//2)])
action1b = agent1.act([None for _ in range(args.num_processes//2)])
action0 = np.concatenate((action0a.squeeze(),action0b.squeeze()))
action1 = np.concatenate((action1a.squeeze(),action1b.squeeze()))
#action0 = torch.tensor(action0)[:,None]
#action1 = torch.tensor(action1)[:,None]
# Obser reward and next obs
obs, reward, done, infos = envs.step(zip(action0, action1))
envs.reset()
agent0.update(None, action0, reward[0])
agent1.update(None, action1, reward[1])
for i, info in enumerate(infos[:args.num_processes//2]):
d = True
if 'episode' in info[0].keys():
old_agent1.new_policy(i)
epoch_p0ep += 1
if "winner" in info[0].keys():
d = False
w0 += 1
if "winner" in info[1].keys():
d = False
eps += 1
episode_rewards0.append(info[0]['episode']['r'])
if d:
p0draws += 1
p0numep += 1
if p0numep % cache_freq == 0:
print(f"ADDING POLICY !! Current length {len(old_agent0.policies)}")
old_agent0.add_policy(copy.deepcopy(agent0))
for i, info in enumerate(infos[args.num_processes//2:]):
if 'episode' in info[1].keys():
old_agent0.new_policy(i)
epoch_p1ep += 1
if "winner" in info[0].keys():
d = False
if "winner" in info[1].keys():
d = False
w1 += 1
episode_rewards1.append(info[1]['episode']['r'])
if d:
p1draws += 1
p1numep += 1
if p1numep % cache_freq == 0:
old_agent1.add_policy(copy.deepcopy(agent1))
if j % args.log_interval == 0 and len(episode_rewards0) > 1:
total_num_steps = args.num_steps * args.num_processes
end = time.time()
print("=============================================================================================")
print(f"Updates {j} player0 rewards { | np.mean(episode_rewards0) | numpy.mean |
# module import
import gc
import os
import copy
import random
import platform
import numpy as np
import pickle as p
import pandas as pd
import multiprocessing as mp
from numpy.matlib import repmat
# scipy module imports
from scipy.stats import norm, linregress
from scipy.spatial.distance import *
from scipy.interpolate import PchipInterpolator as pchip
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
from scipy.signal import periodogram, hamming, boxcar, find_peaks
# sklearn module imports
from sklearn.linear_model import LinearRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
# statsmodels module imports
from statsmodels.nonparametric.smoothers_lowess import lowess
# pyqt5 module import
from PyQt5.QtCore import QThread, pyqtSignal
# custom module imports
import analysis_guis.common_func as cf
import analysis_guis.calc_functions as cfcn
import analysis_guis.rotational_analysis as rot
from analysis_guis.dialogs.rotation_filter import RotationFilteredData
from analysis_guis.cluster_read import ClusterRead
from probez.spike_handling import spike_io
# other parameters
dcopy = copy.deepcopy
default_dir_file = os.path.join(os.getcwd(), 'default_dir.p')
interp_arr = lambda xi, y: np.vstack([interp1d(np.linspace(0, 1, len(x)), x, kind='nearest')(xi) for x in y])
cell_perm_ind = lambda n_cell_tot, n_cell: np.sort(np.random.permutation(n_cell_tot)[:n_cell])
set_sf_cell_perm = lambda spd_sf, n_pool, n_cell: [x[:, :, cell_perm_ind(n_pool, n_cell)] for x in spd_sf]
grp_expt_indices = lambda i_expt0: [np.where(i_expt0 == i)[0] for i in np.unique(i_expt0)]
# lambda function declarations
lin_func = lambda x, a: a * x
########################################################################################################################
########################################################################################################################
class WorkerThread(QThread):
# creates the signal object
work_started = pyqtSignal()
work_progress = pyqtSignal(str, float)
work_finished = pyqtSignal(object)
work_error = pyqtSignal(str, str)
work_plot = pyqtSignal(object)
def __init__(self, parent=None, main_gui=None):
# creates the worker object
super(WorkerThread, self).__init__(parent)
self.update_pbar = True
self.is_running = False
self.forced_quit = False
self.sub_job = None
self.is_ok = True
self.data = None
# other initialisations
self.main_gui = main_gui
self.thread_job_primary = None
self.thread_job_secondary = None
self.thread_job_para = None
def set_worker_func_type(self, thread_job_primary, thread_job_secondary=None, thread_job_para=None):
'''
:param func_type:
:return:
'''
# updates the worker primary/secondary job type and parameters
self.thread_job_primary = thread_job_primary
self.thread_job_secondary = thread_job_secondary
self.thread_job_para = thread_job_para
def run(self):
'''
:return:
'''
# initialisations
w_prog, w_err = self.work_progress, self.work_error
# updates the running/forced quit flagsv
self.is_running = True
self.forced_quit = False
self.is_ok = True
# updates the running parameter and enables the progress group parameters
self.work_started.emit()
# runs the job based on the type
thread_data = None
if self.thread_job_primary == 'init_data_file':
# case is initialising the data file
self.init_cluster_data()
elif self.thread_job_primary == 'init_pool_object':
# case is initialising the pool worker object
thread_data = self.init_pool_worker()
##################################
#### DATA I/O FUNCTIONS ####
##################################
elif self.thread_job_primary == 'load_data_files':
# case is loading the data files
thread_data = self.load_data_file()
elif self.thread_job_primary == 'save_multi_expt_file':
# retrieves the parameters
data, out_info = self.thread_job_para[0], self.thread_job_para[1]
# case is loading the data files
thread_data = self.save_multi_expt_file(data, out_info)
elif self.thread_job_primary == 'save_multi_comp_file':
# retrieves the parameters
data, out_info = self.thread_job_para[0], self.thread_job_para[1]
# case is loading the data files
thread_data = self.save_multi_comp_file(data, out_info)
elif self.thread_job_primary == 'run_calc_func':
# case is the calculation functions
calc_para, plot_para = self.thread_job_para[0], self.thread_job_para[1]
data, pool, g_para = self.thread_job_para[2], self.thread_job_para[3], self.thread_job_para[4]
################################################
#### CLUSTER CLASSIFICATION FUNCTIONS ####
################################################
if self.thread_job_secondary == 'Fixed/Free Cluster Matching':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['clust'])
# case is determining the cluster matches
self.det_cluster_matches(data, calc_para, w_prog)
elif self.thread_job_secondary == 'Cluster Cross-Correlogram':
# case is the cc-gram type determinations
thread_data = self.calc_ccgram_types(calc_para, data.cluster)
######################################
#### AHV ANALYSIS FUNCTIONS ####
######################################
elif ' (Fixed)' in self.thread_job_secondary or \
(self.thread_job_secondary == 'Correlation Significance Overlap'):
# ensures the smoothing window is an odd integer (if smoothing)
if calc_para['is_smooth']:
if calc_para['n_smooth'] % 2 != 1:
# if not, then output an error message to screen
e_str = 'The median smoothing filter window span must be an odd integer.'
w_err.emit(e_str, 'Incorrect Smoothing Window Span')
# sets the error flag and exits the function
self.is_ok = False
self.work_finished.emit(thread_data)
return
# initialises the rotation filter class object (if not already set)
if plot_para['rot_filt'] is None:
plot_para['rot_filt'] = cf.init_rotation_filter_data(False)
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['vel', 'vel_sf_fix'], other_para=False)
# calculates the shuffled kinematic spiking frequencies
cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, dcopy(calc_para), w_prog, roc_calc=False)
cfcn.calc_shuffled_kinematic_spike_freq(data, dcopy(calc_para), w_prog)
# runs any specific additional function
fit_func = ['Correlation Comparison (Fixed)',
'Correlation Fit Parameters (Fixed)',
'Individual Cell Correlation (Fixed)']
if self.thread_job_secondary in fit_func:
# case is the correlation fit parameters
self.calc_corr_fit_para(data, plot_para, dcopy(calc_para), w_prog)
elif (' (Freely Moving)' in self.thread_job_secondary):
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['vel_sf_free'], other_para=False)
# updates the bin velocity
data.rotation.vel_bin_corr = calc_para['vel_bin']
elif 'Fixed/Free Spiking Correlation' in self.thread_job_secondary:
# determines if the freely moving data file has been loaded
if not hasattr(data.externd, 'free_data'):
# if the data-file has not been loaded then output an error to screen and exit
e_str = 'The freely moving spiking frequency/statistics data file must be loaded ' \
'before being able to run this function.\n\nPlease load this data file and try again.'
w_err.emit(e_str, 'Freely Moving Data Missing?')
# exits the function with an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['ff_corr', 'vel'], other_para=False)
# calculates the shuffled kinematic spiking frequencies
cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, calc_para, w_prog, roc_calc=False, use_raw=True)
# calculates the fixed/free correlations (if not already set)
if not data.comp.ff_corr.is_set:
self.calc_fix_free_correlation(data, calc_para, w_prog)
################################################
#### FREELY MOVING ANALYSIS FUNCTIONS ####
################################################
elif self.thread_job_secondary == 'Freely Moving Cell Fit Residual':
# ensures the calculation fields are
self.calc_cell_fit_residual(data, calc_para, w_prog)
######################################
#### EYE TRACKING FUNCTIONS ####
######################################
elif self.thread_job_secondary in ['Eye Movement Event Signals']:
# check to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['eye_track'])
# calculates the eye-tracking metrics (if not calculated)
if len(data.externd.eye_track.t_evnt) == 0:
self.calc_eye_track_metrics(data, calc_para, w_prog)
elif 'Eye Movement Correlation' in self.thread_job_secondary:
# check to see if any parameters have been altered/
self.check_altered_para(data, calc_para, plot_para, g_para, ['eye_track'])
# calculates the eye-tracking metrics (if not calculated)
if len(data.externd.eye_track.t_evnt) == 0:
self.calc_eye_track_metrics(data, calc_para, w_prog)
# calculates the eye-tracking metrics
if len(data.externd.eye_track.t_sp_h) == 0:
self.calc_eye_track_corr(data, calc_para, w_prog)
######################################
#### ROC ANALYSIS FUNCTIONS ####
######################################
elif self.thread_job_secondary == 'Direction ROC Curves (Single Cell)':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['condition'])
# case is the shuffled cluster distances
if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, False, 100.):
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Direction ROC Curves (Whole Experiment)':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase'])
# calculates the phase roc-curves for each cell
if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, False, 33.):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# calculates the phase roc curve/significance values
self.calc_phase_roc_curves(data, calc_para, 66.)
self.calc_phase_roc_significance(calc_para, g_para, data, pool, 100.)
elif self.thread_job_secondary in ['Direction ROC AUC Histograms',
'Direction ROC Spiking Rate Heatmap']:
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['condition'])
# calculates the phase roc-curves for each cell
if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, True, 100., True):
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif 'Velocity ROC Curves' in self.thread_job_secondary:
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['vel'], other_para=True)
# calculates the binned kinematic spike frequencies
cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, calc_para, w_prog)
self.calc_kinematic_roc_curves(data, pool, calc_para, g_para, 50.)
elif self.thread_job_secondary == 'Velocity ROC Significance':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['vel'], other_para=True)
# calculates the binned kinematic spike frequencies
cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, calc_para, w_prog)
# calculates the kinematic roc curves and their significance
self.calc_kinematic_roc_curves(data, pool, calc_para, g_para, 0.)
self.calc_kinematic_roc_significance(data, calc_para, g_para)
elif self.thread_job_secondary == 'Condition ROC Curve Comparison':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['phase'])
# calculates the phase roc-curves for each cell
if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, True, 33.):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# calculates the phase roc curve/significance values
self.calc_phase_roc_curves(data, calc_para, 66.)
self.calc_phase_roc_significance(calc_para, g_para, data, pool, 100.)
elif self.thread_job_secondary == 'Direction ROC Significance':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase'])
# calculates the phase roc-curves for each cell
if not self.calc_cond_roc_curves(data, pool, calc_para, plot_para, g_para, True, 33.,
force_black_calc=True):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# calculates the phase roc curve/significance values
self.calc_phase_roc_curves(data, calc_para, 66.)
self.calc_phase_roc_significance(calc_para, g_para, data, pool, 100.)
if cf.det_valid_vis_expt(data, True):
if not self.calc_dirsel_group_types(data, pool, calc_para, plot_para, g_para):
self.is_ok = False
self.work_finished.emit(thread_data)
return
###############################################
#### COMBINED ANALYSIS LDA FUNCTIONS ####
###############################################
elif self.thread_job_secondary == 'Rotation/Visual Stimuli Response Statistics':
# calculates the phase roc curve/significance values
self.calc_phase_roc_curves(data, calc_para, 50.)
# calculates the direction/selection group types
if not self.calc_dirsel_group_types(data, pool, calc_para, plot_para, g_para):
self.is_ok = False
self.work_finished.emit(thread_data)
elif self.thread_job_secondary == 'Combined Direction ROC Curves (Whole Experiment)':
# checks that the conditions are correct for running the function
if not self.check_combined_conditions(calc_para, plot_para):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase', 'visual'])
# initisalises the rotational filter (if not initialised already)
if plot_para['rot_filt'] is None:
plot_para['rot_filt'] = cf.init_rotation_filter_data(False)
# adds motordrifting (if the visual expt type)
_plot_para, _calc_para = dcopy(plot_para), dcopy(calc_para)
if calc_para['vis_expt_type'] == 'MotorDrifting':
_plot_para['rot_filt']['t_type'].append('MotorDrifting')
# resets the flags to use the full rotation/visual phases
_calc_para['use_full_rot'], _calc_para['use_full_vis'] = True, True
# calculates the phase roc-curves for each cell
if not self.calc_cond_roc_curves(data, pool, _calc_para, _plot_para, g_para, False, 33.):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# calculates the phase roc curve/significance values
self.calc_phase_roc_curves(data, _calc_para, 66.)
if (calc_para['vis_expt_type'] == 'UniformDrifting') and \
(calc_para['grp_stype'] != 'Wilcoxon Paired Test'):
# sets up the visual rotation filter
r_filt_v = cf.init_rotation_filter_data(False)
r_filt_v['t_type'], r_filt_v['is_ud'], r_filt_v['t_cycle'] = ['UniformDrifting'], [True], ['15']
# retrieves the visual filter object
plot_exp_name, plot_all_expt = plot_para['plot_exp_name'], plot_para['plot_all_expt']
r_obj_vis, ind_type = cf.split_unidrift_phases(data, r_filt_v, None, plot_exp_name, plot_all_expt,
'Whole Experiment', 2.)
# calculates the full uniform-drifting curves
self.calc_ud_roc_curves(data, r_obj_vis, ind_type, 66.)
# calculates the direction selection types
if not self.calc_dirsel_group_types(data, pool, _calc_para, _plot_para, g_para):
self.is_ok = False
# calculates the partial roc curves
self.calc_partial_roc_curves(data, calc_para, plot_para, 66.)
elif self.thread_job_secondary in ['Normalised Kinematic Spiking Frequency']:
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['vel'], other_para=False)
# calculates the binned kinematic spike frequencies
cfcn.calc_binned_kinemetic_spike_freq(data, plot_para, calc_para, w_prog, roc_calc=False)
######################################################
#### DEPTH-BASED SPIKING ANALYSIS FUNCTIONS ####
######################################################
elif self.thread_job_secondary == 'Depth Spiking Rate Comparison':
# make a copy of the plotting/calculation parameters
_plot_para, _calc_para, r_data = dcopy(plot_para), dcopy(calc_para), data.depth
_plot_para['plot_exp_name'] = None
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase', 'visual'])
# reduces the data clusters to only include the RSPd/RSPg cells
_data = cfcn.get_rsp_reduced_clusters(data)
# calculates the phase roc-curves for each cell
if not self.calc_cond_roc_curves(_data, pool, _calc_para, _plot_para, g_para, True,
33., r_data=r_data, force_black_calc=True):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# calculates the phase roc curve/significance values
self.calc_phase_roc_curves(_data, _calc_para, 66., r_data=r_data)
############################################
#### SPIKING FREQUENCY CALCULATION ####
############################################
# initialisations
r_filt = _plot_para['rot_filt']
r_data.ch_depth, r_data.ch_region, r_data.ch_layer = \
cfcn.get_channel_depths_tt(_data._cluster, r_filt['t_type'])
t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)
# rotation filtered object calculation
r_obj_rot = RotationFilteredData(_data, r_filt, None, None, True, 'Whole Experiment', False,
t_ofs=t_ofs, t_phase=t_phase)
# calculates the individual trial/mean spiking rates and sets up the plot/stats arrays
sp_f0_rot, sp_f_rot = cf.calc_phase_spike_freq(r_obj_rot)
s_plt, _, sf_stats, ind = cf.setup_spike_freq_plot_arrays(r_obj_rot, sp_f0_rot, sp_f_rot, None, 3)
r_data.plt, r_data.stats, r_data.ind, r_data.r_filt = s_plt, sf_stats, ind, dcopy(r_filt)
elif self.thread_job_secondary == 'Depth Spiking Rate Comparison (Multi-Sensory)':
# checks that the conditions are correct for running the function
if not self.check_combined_conditions(calc_para, plot_para):
self.is_ok = False
self.work_finished.emit(thread_data)
return
else:
# otherwise, make a copy of the plotting/calculation parameters
_plot_para, _calc_para, r_data = dcopy(plot_para), dcopy(calc_para), data.depth
_plot_para['plot_exp_name'], r_filt = None, _plot_para['rot_filt']
t_ofs, t_phase = cfcn.get_rot_phase_offsets(calc_para)
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['condition', 'phase', 'visual'])
# adds motordrifting (if it is the visual expt type)
if calc_para['vis_expt_type'] == 'MotorDrifting':
_plot_para['rot_filt']['t_type'].append('MotorDrifting')
# reduces the data clusters to only include the RSPd/RSPg cells
_data = cfcn.get_rsp_reduced_clusters(data)
# calculates the phase roc-curves for each cell
if not self.calc_cond_roc_curves(_data, pool, _calc_para, _plot_para, g_para, False, 33., r_data=r_data):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# calculates the phase roc curve/significance values
self.calc_phase_roc_curves(_data, _calc_para, 66., r_data=r_data)
if (calc_para['vis_expt_type'] == 'UniformDrifting'):
# sets up the visual rotation filter
r_filt_v = cf.init_rotation_filter_data(False)
r_filt_v['t_type'], r_filt_v['is_ud'], r_filt_v['t_cycle'] = ['UniformDrifting'], [True], ['15']
# retrieves the visual filter object
r_obj_vis, ind_type = cf.split_unidrift_phases(_data, r_filt_v, None, None, True,
'Whole Experiment', 2., t_phase, t_ofs)
# calculates the full uniform-drifting curves
self.calc_ud_roc_curves(_data, r_obj_vis, ind_type, 66., r_data=r_data)
# calculates the individual trial/mean spiking rates and sets up the plot/stats arrays
sp_f0, sp_f = cf.calc_phase_spike_freq(r_obj_vis)
s_plt, _, sf_stats, ind = cf.setup_spike_freq_plot_arrays(r_obj_vis, sp_f0, sp_f, ind_type, 2)
r_data.plt_vms, r_data.stats_vms, r_data.ind_vms = s_plt, sf_stats, ind, r_filt_v
r_data.r_filt_vms = dcopy(r_filt_v)
else:
# resets the uniform drifting fields
r_data.plt_vms, r_data.stats_vms, r_data.ind_vms, r_data.r_filt_vms = None, None, None, None
############################################
#### SPIKING FREQUENCY CALCULATION ####
############################################
# rotation filtered object calculation
r_obj_rot = RotationFilteredData(_data, r_filt, None, None, True, 'Whole Experiment', False,
t_phase=t_phase, t_ofs=t_ofs)
r_data.ch_depth_ms, r_data.ch_region_ms, r_data.ch_layer_ms = \
cfcn.get_channel_depths_tt(_data._cluster, r_filt['t_type'])
# calculates the individual trial/mean spiking rates and sets up the plot/stats arrays
sp_f0_rot, sp_f_rot = cf.calc_phase_spike_freq(r_obj_rot)
s_plt, _, sf_stats, ind = cf.setup_spike_freq_plot_arrays(r_obj_rot, sp_f0_rot, sp_f_rot, None, 3)
r_data.plt_rms, r_data.stats_rms, r_data.ind_rms = s_plt, sf_stats, ind
r_data.r_filt_rms = dcopy(r_filt)
##########################################################
#### ROTATION DISCRIMINATION ANALYSIS FUNCTIONS ####
##########################################################
elif self.thread_job_secondary == 'Rotation Direction LDA':
# if the solver parameter have not been set, then initalise them
d_data = data.discrim.dir
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=d_data)
# sets up the lda values
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, d_data,
w_prog, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
# if an update in the calculations is required, then run the rotation LDA analysis
if not cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,
d_data=d_data, w_prog=w_prog):
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Temporal Duration/Offset LDA':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.temp)
# if the temporal data parameters have changed/has not been initialised then calculate the values
if data.discrim.temp.lda is None:
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.temp)
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.temp,
w_prog, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
# if an update in the calculations is required, then run the temporal LDA analysis
if status == 2:
if not self.run_temporal_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Individual LDA':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.indiv)
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.dir)
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.dir,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
# if an update in the calculations is required, then run the rotation LDA analysis
if not cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,
d_data=data.discrim.dir, w_prog=w_prog):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# if the individual data parameters have changed/has not been initialised then calculate the values
if data.discrim.indiv.lda is None:
# runs the individual LDA
if not self.run_individual_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Shuffled LDA':
# checks to see if any parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.shuffle)
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.dir)
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.dir,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
# if an update in the calculations is required, then run the rotation LDA analysis
if not cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,
d_data=data.discrim.dir, w_prog=w_prog):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# runs the shuffled LDA
if not self.run_shuffled_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Pooled Neuron LDA':
# resets the minimum cell count and checks if the pooled parameters have been altered
# calc_para['lda_para']['n_cell_min'] = calc_para['n_cell_min']
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.part)
# if the pooled data parameters have changed/has not been initialised then calculate the values
if data.discrim.part.lda is None:
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.dir)
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.dir,
w_prog, True, w_err=w_err)
if not calc_para['pool_expt']:
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
# elif status == 2:
# # if an update in the calculations is required, then run the rotation LDA analysis
# if not cfcn.run_rot_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,
# d_data=data.discrim.dir, w_prog=w_prog):
# self.is_ok = False
# self.work_finished.emit(thread_data)
# return
# runs the partial LDA
if not self.run_pooled_lda(pool, data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Individual Cell Accuracy Filtered LDA':
# check to see if the individual LDA calculations have been performed
if data.discrim.indiv.lda is None:
# if the individual LDA has not been run, then output an error to screen
e_str = 'The Individual LDA must be run first before this analysis can be performed'
w_err.emit(e_str, 'Missing Individual LDA Data')
# sets the ok flag to false and exit the function
self.is_ok = False
self.work_finished.emit(thread_data)
return
#
_calc_para = dcopy(calc_para)
_calc_para['comp_cond'] = dcopy(data.discrim.indiv.ttype)
#########################################
#### ROTATION LDA CALCULATIONS ####
#########################################
# sets the min/max accuracy values
_calc_para['lda_para']['y_acc_min'] = 0
_calc_para['lda_para']['y_acc_max'] = 100
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, _calc_para, g_para, ['lda'], other_para=data.discrim.dir)
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, _calc_para, data.discrim.dir,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
# if an update in the calculations is required, then run the rotation LDA analysis
if not cfcn.run_rot_lda(data, _calc_para, r_filt, i_expt, i_cell, n_trial_max,
d_data=data.discrim.dir, w_prog=w_prog, pW=50.):
self.is_ok = False
self.work_finished.emit(thread_data)
return
#########################################
#### FILTERED LDA CALCULATIONS ####
#########################################
# sets the min/max accuracy values
_calc_para['lda_para']['y_acc_min'] = _calc_para['y_acc_min']
_calc_para['lda_para']['y_acc_max'] = _calc_para['y_acc_max']
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, _calc_para, g_para, ['lda'], other_para=data.discrim.filt)
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, _calc_para, data.discrim.filt,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
# if an update in the calculations is required, then run the rotation LDA analysis
if not cfcn.run_rot_lda(data, _calc_para, r_filt, i_expt, i_cell, n_trial_max,
d_data=data.discrim.filt, w_prog=w_prog, pW=50., pW0=50.):
self.is_ok = False
self.work_finished.emit(thread_data)
return
else:
# otherwise, update the calculation parameters
data.discrim.filt.yaccmn = _calc_para['y_acc_min']
data.discrim.filt.yaccmx = _calc_para['y_acc_max']
elif self.thread_job_secondary == 'LDA Group Weightings':
# checks to see if the data class as changed parameters
d_data, w_prog = data.discrim.wght, self.work_progress
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=d_data)
# sets up the lda values
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, d_data,
w_prog, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
# if an update in the calculations is required, then run the rotation LDA analysis
if not self.run_wght_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max):
self.is_ok = False
self.work_finished.emit(thread_data)
return
#######################################################
#### SPEED DISCRIMINATION ANALYSIS FUNCTIONS ####
#######################################################
elif self.thread_job_secondary == 'Speed LDA Accuracy':
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.spdacc)
# if the pooled data parameters have changed/has not been initialised then calculate the values
if data.discrim.spdc.lda is None:
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.spdacc,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
if not self.run_speed_lda_accuracy(data, calc_para, r_filt, i_expt, i_cell, n_trial_max, w_prog):
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Speed LDA Comparison (Individual Experiments)':
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.spdc)
# if the pooled data parameters have changed/has not been initialised then calculate the values
if data.discrim.spdc.lda is None:
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.spdc,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
# if an update in the calculations is required, then run the rotation LDA analysis
if not self.run_kinematic_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max, w_prog):
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif self.thread_job_secondary == 'Speed LDA Comparison (Pooled Experiments)':
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.spdcp)
# if the pooled data parameters have changed/has not been initialised then calculate the values
if data.discrim.spdcp.lda is None:
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.spdcp,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
# elif status == 2:/
# if an update in the calculations is required, then run the rotation LDA analysis
if not self.run_pooled_kinematic_lda(data, calc_para, r_filt, i_expt, i_cell, n_trial_max,
w_prog):
self.is_ok = False
self.work_finished.emit(thread_data)
return
# # calculates the psychometric curves
# w_prog.emit('Calculating Pyschometric Curves', 100.)
# cfcn.calc_all_psychometric_curves(data.discrim.spdcp, float(calc_para['vel_bin']), calc_para['use_all'])
elif self.thread_job_secondary == 'Velocity Direction Discrimination LDA':
# checks to see if any base LDA calculation parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['lda'], other_para=data.discrim.spddir)
# if the pooled data parameters have changed/has not been initialised then calculate the values
if data.discrim.spddir.lda is None:
# sets up the important arrays for the LDA
r_filt, i_expt, i_cell, n_trial_max, status = cfcn.setup_lda(data, calc_para, data.discrim.spddir,
w_prog, True, w_err=w_err)
if status == 0:
# if there was an error in the calculations, then return an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
elif status == 2:
if not self.run_speed_dir_lda_accuracy(data, calc_para, r_filt, i_expt, i_cell,
n_trial_max, w_prog):
self.is_ok = False
self.work_finished.emit(thread_data)
return
#######################################
#### MISCELLANEOUS FUNCTIONS ####
#######################################
elif self.thread_job_secondary == 'Velocity Multilinear Regression Dataframe Output':
# checks to see if any base spiking frequency dataframe parameters have been altered
self.check_altered_para(data, calc_para, plot_para, g_para, ['spikedf'], other_para=data.spikedf)
# checks to see if the overlap duration is less than the time bin size
if calc_para['t_over'] >= calc_para['bin_sz']:
# if not, then output an error to screen
e_str = 'Bin Overlap Duration must be less than the Time Bin Size.\n' \
'Reset these parameters before running this function.'
w_err.emit(e_str, 'Incorrect Function Parameters')
# exits the function with an error flag
self.is_ok = False
self.work_finished.emit(thread_data)
return
# only continue if the spiking frequency dataframe has not been set up
if not data.spikedf.is_set:
self.setup_spiking_freq_dataframe(data, calc_para)
elif self.thread_job_secondary == 'Autocorrelogram Theta Index Calculations':
# case to see if any parameters have changed
self.check_altered_para(data, calc_para, plot_para, g_para, ['theta'], other_para=data.theta_index)
# only continue if the theta index dataframe has not been setup
if not data.theta_index.is_set:
self.calc_auto_ccgram_fft(data, calc_para)
###############################
#### OTHER FUNCTIONS ####
###############################
elif self.thread_job_secondary == 'Shuffled Cluster Distances':
# case is the shuffled cluster distances
thread_data = self.calc_shuffled_cluster_dist(calc_para, data.cluster)
elif self.thread_job_primary == 'update_plot':
pass
# emits the finished work signal
self.work_finished.emit(thread_data)
############################################
#### THREAD CALCULATION FUNCTIONS ####
############################################
def load_data_file(self):
'''
:param exp_file:
:return:
'''
# retrieves the job parameters
load_dlg, loaded_exp, is_multi = self.thread_job_para[0], self.thread_job_para[1], self.thread_job_para[2]
if not np.any([not x in loaded_exp for x in load_dlg.exp_name]):
# if there are no new experiments to load, then exit the function
return None
else:
n_file = len(load_dlg.exp_files)
dpw, p_rlx, data = 1.0 / n_file, 0.05, []
_, f_extn = os.path.splitext(load_dlg.exp_files[0])
#
for i_file in range(n_file):
if not self.is_running:
# if the user cancelled, then exit
return None
else:
# updates the progress bar string
p_str, pw0 = 'Loading File {0} of {1}'.format(i_file+1, n_file), i_file / n_file
self.work_progress.emit(p_str, 100.0 * pw0)
# sets the experiment file and name
if load_dlg.exp_name[i_file] not in loaded_exp:
# loads the data from the data file
with open(load_dlg.exp_files[i_file], 'rb') as fp:
data_nw = p.load(fp)
# setting of other fields
if isinstance(data_nw, dict):
data_nw['expFile'] = load_dlg.exp_files[i_file]
# re-calculates the signal features (single experiment only)
if f_extn == '.cdata':
if np.shape(data_nw['sigFeat'])[1] == 5:
# memory allocation for the signal features
xi = np.array(range(data_nw['nPts']))
sFeat = np.zeros((data_nw['nC'], 2))
for i in range(data_nw['nC']):
# creates the piecewise-polynomial of the mean signal
pp, t_max = pchip(xi, data_nw['vMu'][:, i]), data_nw['sigFeat'][i, 2]
t_min = np.argmin(data_nw['vMu'][int(t_max):, i]) + t_max
v_max_2 = data_nw['vMu'][int(t_max), i] / 2.0
v_min = np.min(data_nw['vMu'][int(t_max):, i])
v_half = data_nw['vMu'][int(data_nw['sigFeat'][i, 1]), i] / 2.0
##################################################
#### POST-STIMULI SPIKE HALF-WIDTH TIME ####
##################################################
# determines the point/voltage of the pmaximum proceding the minimum
bnd_1 = [(data_nw['sigFeat'][i, 0], data_nw['sigFeat'][i, 1])]
bnd_2 = [(data_nw['sigFeat'][i, 1], data_nw['sigFeat'][i, 2])]
bnd_3 = [(data_nw['sigFeat'][i, 2], t_min)]
# determines the location of the half-width points
t_hw1_lo = cfcn.opt_time_to_y0((pp, v_half), bnd_1)
t_hw1_hi = cfcn.opt_time_to_y0((pp, v_half), bnd_2)
t_hw2_lo = cfcn.opt_time_to_y0((pp, v_max_2), bnd_2)
t_hw2_hi = cfcn.opt_time_to_y0((pp, v_max_2), bnd_3)
t_rlx = cfcn.opt_time_to_y0((pp, v_min + p_rlx * (v_max_2 - v_min)), bnd_3)
# determine if it is feasible to find the 2nd peak half-width point
if (t_hw2_hi is None) or (t_rlx is None):
# if not, then linearly extrapolate past the end point of the signal
xi2 = np.array(range(2*xi[-1]))
ppL = IUS(xi, data_nw['vMu'][:, i], k=1)
# determines the half-width/relaxtion time from the extrapolated signal
bnd_4 = [(data_nw['sigFeat'][i, 2], xi2[-1])]
t_hw2_hi = cfcn.opt_time_to_y0((ppL, v_max_2), bnd_4)
t_rlx = cfcn.opt_time_to_y0((ppL, v_min + p_rlx * (v_max_2 - v_min)), bnd_4)
# calculates the new signal features
data_nw['sigFeat'][i, 3] = t_hw1_lo
data_nw['sigFeat'][i, 4] = t_hw1_hi
sFeat[i, 0] = t_hw2_hi - t_hw2_lo
sFeat[i, 1] = t_rlx - t_max
# concatenates the new signal feature date
data_nw['sigFeat'] = np.concatenate((data_nw['sigFeat'], sFeat), axis=1)
# sets the cell cluster include indices (if not already set)
if 'clInclude' not in data_nw['expInfo']:
data_nw['expInfo']['clInclude'] = np.ones(data_nw['nC'], dtype=bool)
# appends the new data dictionary to the overall data list
data.append(data_nw)
# appends the current filename to the data dictionary and returns the object
return data
def save_multi_expt_file(self, data, out_info):
'''
:return:
'''
# updates the progressbar
self.work_progress.emit('Saving Data To File...', 50.0)
# sets the file extension (based on the data type)
if hasattr(data.comp, 'data'):
f_extn = 'mdata' if len(data.comp.data) == 0 else 'mcomp'
else:
f_extn = 'mdata'
# sets the output file name
out_file = os.path.join(out_info['inputDir'], '{0}.{1}'.format(out_info['dataName'], f_extn))
# outputs the data to file
with open(out_file, 'wb') as fw:
p.dump(data, fw)
# updates the progressbar
self.work_progress.emit('Data Save Complete!', 100.0)
def save_multi_comp_file(self, data, out_info):
'''
:return:
'''
# updates the progressbar
self.work_progress.emit('Saving Data To File...', 50.0)
# memory allocation
n_file = len(out_info['exptName'])
# sets the output file name
out_file = os.path.join(out_info['inputDir'], '{0}.mcomp'.format(out_info['dataName']))
# output data file
data_out = {
'data': np.empty((n_file, 2), dtype=object),
'c_data': np.empty(n_file, dtype=object),
'ff_corr': data.comp.ff_corr if hasattr(data.comp, 'ff_corr') else None,
'f_data': data.externd.free_data if hasattr(data.externd, 'free_data') else None
}
for i_file in range(n_file):
# retrieves the index of the data field corresponding to the current experiment
fix_file = out_info['exptName'][i_file].split('/')[0]
i_comp = cf.det_comp_dataset_index(data.comp.data, fix_file)
# creates the multi-experiment data file based on the type
data_out['c_data'][i_file] = data.comp.data[i_comp]
data_out['data'][i_file, 0], data_out['data'][i_file, 1] = \
cf.get_comp_datasets(data, c_data=data_out['c_data'][i_file], is_full=True)
# outputs the data to file
with open(out_file, 'wb') as fw:
p.dump(data_out, fw)
# updates the progressbar
self.work_progress.emit('Data Save Complete!', 100.0)
def init_pool_worker(self):
'''
:return:
'''
# creates the pool worker object
p = mp.Pool(int(np.floor(1.5 * mp.cpu_count())))
# returns the object
return p
def init_cluster_data(self):
'''
:return:
'''
def map_cluster_depths():
'''
:param cluster_depth:
:return:
'''
# retrieves the cluster depths from the spike I/O class object
cluster_depth = sp_io.get_cluster_depths(cluster_ids)
# sets the mapped cluster depths based on the file type
if (exp_info['dmapFile'] is None) or (len(exp_info['dmapFile']) == 0):
# no map is given so return the original depth values
return cluster_depth, None
else:
# otherwise, map the cluster depth values from the probe to actual values
data = np.array(pd.read_csv(exp_info['dmapFile']))
if np.size(data, axis=1) < 4:
# if the mapping file is not correct, then output an error to screen
e_str = 'Channel mapping file does not have the correct format.\n\n' \
'Re-select a valid file before attempting to initialise the combined data files.'
self.work_error.emit(e_str, 'Invalid Channel Mapping File')
# return none values indicating the error
return None, None
else:
# otherwise, return the mapped channel depths and the other mapping values
return np.array([data[data[:, 1] == x, 0][0] for x in cluster_depth]), data[:, :4]
# retrieves the job parameters
exp_info, out_name, g_para = self.thread_job_para[0], self.thread_job_para[1], self.thread_job_para[2]
# sets the global parameters
n_hist = int(g_para['n_hist'])
n_spike = int(g_para['n_spike'])
cluster_ids = None
# retrieves the spike I/O data and sets the cluster IDs based on the cluster type
sp_io = spike_io.SpikeIo(exp_info['srcDir'], exp_info['traceFile'], int(exp_info['nChan']))
if exp_info['clusterType'] == 'Good':
# case is the good clusters
if hasattr(sp_io, 'good_cluster_ids'):
cluster_ids = sp_io.good_cluster_ids
elif exp_info['clusterType'] == 'MUA':
# case is the multi-unit clusters
if hasattr(sp_io, 'MUA_cluster_ids'):
cluster_ids = sp_io.MUA_cluster_ids
if cluster_ids is None:
e_str = 'Cluster group file is missing? Please re-run with cluster-group file in the source data directory'
self.work_error.emit(e_str, 'Cluster Group File Missing!')
return
# retrieves the clusters spike data and channel depths
self.work_progress.emit('Reshaping Cluster Data...', 0.0)
clusters = [ClusterRead(sp_io, cid) for cid in cluster_ids]
# determines the channel depths mapping
depth, channel_map_data = map_cluster_depths()
if depth is None:
# if the file has an incorrect format, then exit the function
return
# determines if the mapping values were set correctly
if channel_map_data is not None:
# if so, then determine the region/recording layers
y_coords = channel_map_data[:, 3]
depthLo, depthHi = np.array(exp_info['depthLo']).astype(int), np.array(exp_info['depthHi']).astype(int)
indD = np.array([next((i for i in range(len(depthHi)) if x <= depthHi[i]), len(depthHi)-1) for x in y_coords])
chRegion = np.array(exp_info['regionName'])[indD][depth.astype(int)]
chLayer = np.array(exp_info['recordLayer'])[indD][depth.astype(int)]
else:
# otherwise, return N/A for the region/recording layers
chRegion, chLayer = ['N/A'] * len(clusters), ['N/A'] * len(clusters)
depthLo, depthHi = None, None
# sets the signal point-wise/ISI bin vectors
xi_pts_H = np.linspace(-200, 100, n_hist + 1)
xi_isi_H = np.linspace(0, 1000, n_hist + 1)
# creates the recording/experimental information sub-dictionaries
expInfo = {'name': exp_info['expName'], 'date': exp_info['expDate'], 'cond': exp_info['expCond'],
'type': exp_info['expType'], 'sex': exp_info['expSex'], 'age': exp_info['expAge'],
'probe': exp_info['expProbe'], 'lesion': exp_info['lesionType'], 'channel_map': channel_map_data,
'cluster_type': exp_info['clusterType'], 'other_info': exp_info['otherInfo'],
'record_state': exp_info['recordState'], 'record_coord': exp_info['recordCoord'],
'depth_lo': depthLo, 'depth_hi': depthHi}
# memory allocation
pW0, pW1, nFeat = 20.0, 60.0, 5
nC, nSample = len(clusters), np.size(sp_io.traces, axis=0)
sFreq, vGain = float(exp_info['sFreq']), float(exp_info['vGain'])
# sets the data file dictionary object
A = {
'vSpike': np.empty(nC, dtype=object), 'tSpike': np.empty(nC, dtype=object),
'vMu': None, 'vSD': None, 'ccGram': None, 'ccGramXi': None, 'sigFeat': np.zeros((nC, nFeat)),
'clustID': cluster_ids, 'expInfo': expInfo, 'chDepth': depth, 'chRegion': chRegion, 'chLayer': chLayer,
'sFreq': sFreq, 'nC': nC, 'nPts': None, 'tExp': nSample / sFreq, 'vGain': vGain,
'isiHist': np.empty(nC, dtype=object), 'isiHistX': xi_isi_H,
'ptsHist': np.empty(nC, dtype=object), 'ptsHistX': xi_pts_H,
'rotInfo': None,
}
# sets up the rotation analysis data dictionary
A['rotInfo'] = rot.load_rot_analysis_data(A, exp_info, sp_io, w_prog=self.work_progress, pW0=pW0)
# sets up the sub-job flags
self.sub_job = np.zeros(nC, dtype=bool)
# retrieves the cluster data
for i, c in enumerate(clusters):
if not self.is_running:
# if the user cancelled, then exit the function
return
else:
# updates the main gui progressnbar
pW = pW0 + pW1 * (i + 1) / nC
self.work_progress.emit('Processing Cluster {0} of {1}'.format(i + 1, nC), pW)
###################################################
#### DATA RETRIEVAL & MEMORY ALLOCATIONS ####
###################################################
# retrieves the spike voltage/timing
v_spike = c.channel_waveforms
t_spike = 1000.0 * sp_io.get_spike_times_in_cluster(cluster_ids[i]) / sFreq
# memory allocation (only for the first cluster)
if i == 0:
A['nPts'] = np.size(v_spike, axis=0)
A['vMu'] = np.zeros((A['nPts'], nC), dtype=float)
A['vSD'] = np.zeros((A['nPts'], nC), dtype=float)
xi = np.array(range(A['nPts']))
###############################################
#### MAIN METRIC CALCULATION/STORAGE ####
###############################################
# sets the values into the final array
A['vSpike'][i] = v_spike[:, :n_spike] * vGain
A['tSpike'][i] = t_spike[:np.size(v_spike, axis=1)]
# calculates the mean/standard deviation of the voltage spikes
A['vMu'][:, i] = np.mean(v_spike, axis=1) * vGain
A['vSD'][:, i] = np.std(v_spike, axis=1) * vGain
######################################
#### HISTOGRAM CALCULATIONS ####
######################################
# calculates the point-wise histograms
A['ptsHist'][i] = np.zeros((A['nPts'], n_hist), dtype=int)
for iPts in range(A['nPts']):
H = np.histogram(v_spike[iPts, :], bins=xi_pts_H)
A['ptsHist'][i][iPts, :] = H[0]
# calculates the ISI histograms
dT = np.diff(A['tSpike'][i])
dT = dT[dT <= xi_isi_H[-1]]
H_isi = np.histogram(dT, bins=xi_isi_H, range=(xi_isi_H[0], xi_isi_H[-1]))
A['isiHist'][i] = H_isi[0]
###########################################
#### SIGNAL FEATURE CALCULATIONS ####
###########################################
# creates the piecewise-polynomial of the mean signal
pp = pchip(xi, A['vMu'][:, i])
# determines the point/voltage of the pmaximum proceding the minimum
i_min = np.argmin(A['vMu'][:, i])
i_max1 = np.argmax(A['vMu'][:i_min, i])
i_max2 = np.argmax(A['vMu'][i_min:, i]) + i_min
# determines the location of the half-width points
v_half = (min(pp(i_max1), pp(i_max2)) + pp(i_min)) / 2.0
t_lo = cfcn.opt_time_to_y0((pp, v_half), [(i_max1, i_min)])
t_hi = cfcn.opt_time_to_y0((pp, v_half), [(i_min, i_max2)])
# sets the signal features into the final array
A['sigFeat'][i, :] = [i_max1, i_min, i_max2, t_lo, t_hi]
# memory garbage collection
gc.collect()
######################################################
#### CLUSTER CROSS-CORRELOGRAM CALCULATIONS ####
######################################################
# memory allocation
win_size = 50
# calculates the cross-correlation between each signal from each cluster
for i_row in range(nC):
if not self.is_running:
# if the user cancelled, then exit the function
return
else:
# updates the main gui progressbar
pW = (pW0 + pW1) + (100.0 - (pW0 + pW1)) * (i_row + 1) / (nC + 1)
self.work_progress.emit('Calculating CC-Grams...', pW)
# calculates the cross-correlograms between each of the other clusters
for j_row in range(nC):
if (i_row == 0) and (j_row == 0):
# case is the first cluster so allocate memory and set the time bin array
ccGram, A['ccGramXi'] = cfcn.calc_ccgram(A['tSpike'][i_row], A['tSpike'][j_row], win_size)
A['ccGram'] = np.zeros((nC, nC, len(ccGram)))
A['ccGram'][i_row, j_row, :] = ccGram
else:
# otherwise, set the new values directly into the array
A['ccGram'][i_row, j_row, :], _ = cfcn.calc_ccgram(A['tSpike'][i_row], A['tSpike'][j_row], win_size)
#################################
#### FINAL DATA OUTPUT ####
#################################
# dumps the cluster data to file
self.work_progress.emit('Outputting Data To File...', 99.0)
cf.save_single_file(out_name, A)
##########################################
#### CLUSTER MATCHING FUNCTIONS ####
##########################################
def det_cluster_matches(self, data, calc_para, w_prog):
'''
:param exp_name:
:param comp_dlg:
:return:
'''
# retrieves the comparison dataset
i_comp = cf.det_comp_dataset_index(data.comp.data, calc_para['calc_comp'])
c_data, data.comp.last_comp = data.comp.data[i_comp], i_comp
# if there is no further calculation necessary, then exit the function
if c_data.is_set:
return
# updates the cluster matching parameters
c_data.is_set = True
c_data.d_max = calc_para['d_max']
c_data.r_max = calc_para['r_max']
c_data.sig_corr_min = calc_para['sig_corr_min']
c_data.isi_corr_min = calc_para['isi_corr_min']
c_data.sig_diff_max = calc_para['sig_diff_max']
c_data.sig_feat_min = calc_para['sig_feat_min']
c_data.w_sig_feat = calc_para['w_sig_feat']
c_data.w_sig_comp = calc_para['w_sig_comp']
c_data.w_isi = calc_para['w_isi']
# retrieves the fixed/free cluster dataframes
data_fix, data_free = cf.get_comp_datasets(data, c_data=c_data, is_full=True)
def det_overall_cluster_matches(is_feas, D):
'''
:param data_fix:
:param data_free:
:param D:
:return:
'''
# calculates the pair-wise SS distances between each the fixed/free mean signals
iDsort, n_rows = np.argsort(D.T, axis=None), np.size(D, axis=0)
# memory allocation
isFix = np.zeros(data_fix['nC'], dtype=bool)
isFree = np.zeros(data_free['nC'], dtype=bool)
i_match = -np.ones(data_fix['nC'], dtype=int)
# determines the overall unique
for i in range(len(iDsort)):
# determines the indices of the next best match
iR, iC = cfcn.ind2sub(n_rows, iDsort[i])
if not (isFix[iR] or isFree[iC]) and is_feas[iR, iC]:
# if there is not already a match, then update the match arrays
i_match[iR] = iC
isFix[iR], isFree[iC] = True, True
if all(isFix) or all(isFree):
# if all matches are found, then exit the loop
break
# returns the final match array
return i_match
def det_cluster_matches_old(c_data, is_feas, d_depth):
'''
:param data_fix:
:param data_free:
:return:
'''
# parameters
z_max = 1.0
# calculates the inter-signal euclidean distances
DD = cdist(data_fix['vMu'].T, data_free['vMu'].T)
# determines the matches based on the signal euclidean distances
c_data.i_match_old = det_overall_cluster_matches(is_feas, DD)
# calculates the correlation coefficients between the best matching signals
for i in range(data_fix['nC']):
# calculation of the z-scores
i_match = c_data.i_match_old[i]
if i_match >= 0:
# z-score calculations
dW = data_fix['vMu'][:, i] - data_free['vMu'][:, i_match]
c_data.z_score[:, i] = np.divide(dW, data_fix['vSD'][:, i])
# calculates the correlation coefficient
CC = np.corrcoef(data_fix['vMu'][:, i], data_free['vMu'][:, i_match])
c_data.sig_corr_old[i] = CC[0, 1]
c_data.sig_diff_old[i] = DD[i, i_match]
c_data.d_depth_old[i] = d_depth[i, i_match]
# sets the acceptance flag. for a cluster to be accepted, the following must be true:
# * the maximum absolute z-score must be < z_max
# * the correlation coefficient between the fixed/free signals must be > sig_corr_min
c_data.is_accept_old[i] = np.max(np.abs(c_data.z_score[:, i])) < z_max and \
c_data.sig_corr_old[i] > c_data.sig_corr_min
else:
# sets NaN values for all the single value metrics
c_data.sig_corr[i] = np.nan
c_data.d_depth_old[i] = np.nan
# ensures the group is rejected
c_data.is_accept_old[i] = False
def det_cluster_matches_new(c_data, is_feas, d_depth, r_spike, w_prog):
'''
:param data_fix:
:param data_free:
:return:
'''
# parameters
pW = 100.0 / 7.0
# memory allocation
signal_metrics = np.zeros((data_fix['nC'], data_free['nC'], 4))
isi_metrics = np.zeros((data_fix['nC'], data_free['nC'], 3))
isi_metrics_norm = np.zeros((data_fix['nC'], data_free['nC'], 3))
total_metrics = np.zeros((data_fix['nC'], data_free['nC'], 3))
# initialises the comparison data object
w_prog.emit('Calculating Signal DTW Indices', pW)
c_data = cfcn.calc_dtw_indices(c_data, data_fix, data_free, is_feas)
# calculates the signal feature metrics
w_prog.emit('Calculating Signal Feature Metrics', 2.0 * pW)
signal_feat = cfcn.calc_signal_feature_diff(data_fix, data_free, is_feas)
# calculates the signal direct matching metrics
w_prog.emit('Calculating Signal Comparison Metrics', 3.0 * pW)
cc_dtw, dd_dtw, dtw_scale = \
cfcn.calc_signal_corr(c_data.i_dtw, data_fix, data_free, is_feas)
signal_metrics[:, :, 0] = cc_dtw
signal_metrics[:, :, 1] = 1.0 - dd_dtw
signal_metrics[:, :, 2] = dtw_scale
signal_metrics[:, :, 3] = \
cfcn.calc_signal_hist_metrics(data_fix, data_free, is_feas, cfcn.calc_hist_intersect, max_norm=True)
# calculates the ISI histogram metrics
w_prog.emit('Calculating ISI Histogram Comparison Metrics', 4.0 * pW)
isi_metrics[:, :, 0], isi_metrics_norm[:, :, 0] = \
cfcn.calc_isi_corr(data_fix, data_free, is_feas)
isi_metrics[:, :, 1], isi_metrics_norm[:, :, 1] = \
cfcn.calc_isi_hist_metrics(data_fix, data_free, is_feas, cfcn.calc_hist_intersect, max_norm=True)
# isi_metrics[:, :, 2], isi_metrics_norm[:, :, 2] = \
# cfcn.calc_isi_hist_metrics(data_fix, data_free, is_feas, cfcn.calc_wasserstein, max_norm=False)
# isi_metrics[:, :, 3], isi_metrics_norm[:, :, 3] = \
# cfcn.calc_isi_hist_metrics(data_fix, data_free, is_feas, cfcn.calc_bhattacharyya, max_norm=True)
# sets the isi relative spiking rate metrics
isi_metrics[:, :, 2] = np.nan
for i_row in range(np.size(r_spike, axis=0)):
isi_metrics[i_row, is_feas[i_row, :], 2] = r_spike[i_row, is_feas[i_row, :]]
isi_metrics_norm[:, :, 2] = cfcn.norm_array_rows(isi_metrics[:, :, 2], max_norm=False)
# calculates the array euclidean distances (over all measures/clusters)
weight_array = [c_data.w_sig_feat, c_data.w_sig_comp, c_data.w_isi]
total_metrics[:, :, 0] = cfcn.calc_array_euclidean(signal_feat)
total_metrics[:, :, 1] = cfcn.calc_array_euclidean(signal_metrics)
total_metrics[:, :, 2] = cfcn.calc_array_euclidean(isi_metrics_norm)
total_metrics_mean = cfcn.calc_weighted_mean(total_metrics, W=weight_array)
# determines the unique overall cluster matches
w_prog.emit('Determining Overall Cluster Matches', 5.0 * pW)
c_data.i_match = det_overall_cluster_matches(is_feas, -total_metrics_mean)
# matches which are from different regions are to be removed
ii = np.where(c_data.i_match >= 0)[0]
same_region = data_fix['chRegion'][ii] == data_free['chRegion'][c_data.i_match[ii]]
c_data.i_match[ii[~same_region]] = -1
# calculates the correlation coefficients between the best matching signals
w_prog.emit('Setting Final Match Metrics', 6.0 * pW)
for i in range(data_fix['nC']):
# calculation of the z-scores
i_match = c_data.i_match[i]
if i_match >= 0:
# sets the signal feature metrics
c_data.match_intersect[:, i] = cfcn.calc_single_hist_metric(data_fix, data_free, i, i_match,
True, cfcn.calc_hist_intersect)
c_data.match_wasserstain[:, i] = cfcn.calc_single_hist_metric(data_fix, data_free, i,
i_match, True, cfcn.calc_wasserstein)
c_data.match_bhattacharyya[:, i] = cfcn.calc_single_hist_metric(data_fix, data_free, i,
i_match, True, cfcn.calc_bhattacharyya)
# sets the signal difference metrics
c_data.d_depth[i] = d_depth[i, i_match]
c_data.dtw_scale[i] = dtw_scale[i, i_match]
c_data.sig_corr[i] = cc_dtw[i, i_match]
c_data.sig_diff[i] = max(0.0, 1 - dd_dtw[i, i_match])
c_data.sig_intersect[i] = signal_metrics[i, i_match, 2]
# sets the isi metrics
c_data.isi_corr[i] = isi_metrics[i, i_match, 0]
c_data.isi_intersect[i] = isi_metrics[i, i_match, 1]
# sets the total match metrics
c_data.signal_feat[i, :] = signal_feat[i, i_match, :]
c_data.total_metrics[i, :] = total_metrics[i, i_match, :]
c_data.total_metrics_mean[i] = total_metrics_mean[i, i_match]
# sets the acceptance flag. for a cluster to be accepted, the following must be true:
# * the ISI correlation coefficient must be > isi_corr_min
# * the signal correlation coefficient must be > sig_corr_min
# * the inter-signal euclidean distance must be < sig_diff_max
# * all signal feature metric similarity scores must be > sig_feat_min
c_data.is_accept[i] = (c_data.isi_corr[i] > c_data.isi_corr_min) and \
(c_data.sig_corr[i] > c_data.sig_corr_min) and \
(c_data.sig_diff[i] > (1 - c_data.sig_diff_max)) and \
(np.all(c_data.signal_feat[i, :] > c_data.sig_feat_min))
else:
# sets NaN values for all the single value metrics
c_data.d_depth[i] = np.nan
c_data.dtw_scale[i] = np.nan
c_data.sig_corr[i] = np.nan
c_data.sig_diff[i] = np.nan
c_data.sig_intersect[i] = np.nan
c_data.isi_corr[i] = np.nan
c_data.isi_intersect[i] = np.nan
c_data.signal_feat[i, :] = np.nan
c_data.total_metrics[i, :] = np.nan
c_data.total_metrics_mean[i] = np.nan
# ensures the group is rejected
c_data.is_accept[i] = False
# determines the number of spikes
n_spike_fix = [len(x) / data_fix['tExp'] for x in data_fix['tSpike']]
n_spike_free = [len(x) / data_free['tExp'] for x in data_free['tSpike']]
# calculates the relative spiking rates (note - ratios are coverted so that they are all > 1)
r_spike = np.divide(repmat(n_spike_fix, data_free['nC'], 1).T,
repmat(n_spike_free, data_fix['nC'], 1))
r_spike[r_spike < 1] = 1 / r_spike[r_spike < 1]
# calculates the pair-wise distances between the fixed/free probe depths
d_depth = np.abs(np.subtract(repmat(data_fix['chDepth'], data_free['nC'], 1).T,
repmat(data_free['chDepth'], data_fix['nC'], 1)))
# determines the feasible fixed/free cluster groupings such that:
# 1) the channel depth has to be <= d_max
# 2) the relative spiking rates between clusters is <= r_max
is_feas = np.logical_and(r_spike <= c_data.r_max, d_depth <= c_data.d_max)
# determines the cluster matches from the old/new methods
det_cluster_matches_old(c_data, is_feas, d_depth)
det_cluster_matches_new(c_data, is_feas, d_depth, r_spike, w_prog)
def calc_ccgram_types(self, calc_para, data):
'''
:return:
'''
# determines the indices of the experiment to be analysed
if calc_para['calc_all_expt']:
# case is all experiments are to be analysed
i_expt = list(range(len(data)))
else:
# case is a single experiment is being analysed
i_expt = [cf.get_expt_index(calc_para['calc_exp_name'], data)]
# memory allocation
d_copy = copy.deepcopy
A, B, C = np.empty(len(i_expt), dtype=object), [[] for _ in range(5)], [[] for _ in range(4)]
c_type, t_dur, t_event, ci_lo, ci_hi, ccG_T = d_copy(A), d_copy(A), d_copy(A), d_copy(A), d_copy(A), d_copy(A)
#
for i_ex in i_expt:
# sets the experiment ID info based on the number of experiments being analysed
if len(i_expt) == 1:
# only one experiment is being analysed
expt_id = None
else:
# multiple experiments are being analysed
expt_id = [(i_ex+1), len(i_expt)]
# retrieves the cluster information
t_dur[i_ex], t_event[i_ex] = d_copy(C), d_copy(C)
c_type[i_ex], ci_lo[i_ex], ci_hi[i_ex], ccG_T[i_ex] = d_copy(B), d_copy(B), d_copy(B), d_copy(B)
ccG, ccG_xi, t_spike = data[i_ex]['ccGram'], data[i_ex]['ccGramXi'], data[i_ex]['tSpike']
c_id = data[i_ex]['clustID']
# runs the cc-gram type calculation function
c_type0, t_dur[i_ex], t_event[i_ex], ci_hi0, ci_lo0, ccG_T0 = cfcn.calc_ccgram_types(
ccG, ccG_xi, t_spike, calc_para=calc_para, expt_id=expt_id, w_prog=self.work_progress, c_id=c_id)
# sets the final values into their respective groupings
for i in range(5):
# sets the final type values and lower/upper bound confidence interval signals
if len(c_type0[i]):
#
c_type[i_ex][i] = | np.vstack(c_type0[i]) | numpy.vstack |
import numpy as np
from numba import vectorize
# Size 为列表,为神经网络结构,比如[3,5,5,4,2],3是输入层神经元个数,中间为隐藏层每层神经元个数,2为输出层个数
class nn_Creat():
def __init__(self,Size,active_fun='sigmoid',learning_rate=1.5,batch_normalization=1,objective_fun='MSE',
output_function='sigmoid',optimization_method='normal',weight_decay=0):
self.Size=Size # 初始化网络参数,并进行打印
print('the structure of the NN is \n', self.Size)
self.active_fun=active_fun
print('active function is %s '% active_fun)
self.learning_rate=learning_rate
print('learning_rate is %s '% learning_rate)
self.batch_normalization=batch_normalization
print('batch_normalization is %d '% batch_normalization)
self.objective_fun=objective_fun
print('objective_function is %s '% objective_fun)
self.optimization_method=optimization_method
print('optimization_method is %s '% optimization_method)
self.weight_decay = weight_decay
print('weight_decay is %f '% weight_decay)
# 初始化网络权值和梯度
self.vecNum=0
self.depth=len(Size)
self.W=[]
self.b=[]
self.W_grad=[]
self.b_grad=[]
self.cost=[]
if self.batch_normalization: # 是否运用批量归一化,如果用,则引入期望E和方差S,以及缩放因子Gamma、Beta
self.E = []
self.S = []
self.Gamma = []
self.Beta = []
if objective_fun=='Cross Entropy': # 目标函数是否为交叉墒函数
self.output_function='softmax'
else:
self.output_function='sigmoid'
print('output_function is %s \n'% self.output_function)
print('Start training NN \n')
for item in range(self.depth-1):
width=self.Size[item]
height=self.Size[item+1]
q=2*np.random.rand(height,width)/np.sqrt(width)-1/np.sqrt(width) #初始化权系数W
self.W.append(q)
if self.active_fun=='relu': # 判断激活函数是否为relu函数,以决定b的初始化形式
self.b.append(np.random.rand(height,1)+0.01)
else:
self.b.append(2*np.random.rand(height,1)/np.sqrt(width)-1/np.sqrt(width))
if self.optimization_method=='Momentum': #优化方向是否使用矩形式,即为之前梯度的叠加
if item!=0:
self.vW.append(np.zeros([height,width]))
self.vb.append(np.zeros([height, 1]))
else:
self.vW=[]
self.vb=[]
self.vW.append(np.zeros([height, width]))
self.vb.append(np.zeros([height, 1]))
if self.optimization_method=='AdaGrad'or optimization_method=='RMSProp' or optimization_method=='Adam': #优化方法是否使用上述方法
if item!=0:
self.rW.append(np.zeros([height,width]))
self.rb.append(np.zeros([height, 1]))
else:
self.rW=[]
self.rb=[]
self.rW.append(np.zeros([height, width]))
self.rb.append(np.zeros([height, 1]))
if self.optimization_method == 'Adam': #优化方法是否为Adam方法
if item!=0:
self.sW.append(np.zeros([height, width]))
self.sb.append(np.zeros([height, 1]))
else:
self.sW = []
self.sb = []
self.sW.append(np.zeros([height, width]))
self.sb.append(np.zeros([height, 1]))
if self.batch_normalization: #是否对每层进行归一化
self.Gamma.append( | np.array([1]) | numpy.array |
# ________
# /
# \ /
# \ /
# \/
import random
import textwrap
import emd_mean
import AdvEMDpy
import emd_basis
import emd_utils
import numpy as np
import pandas as pd
import cvxpy as cvx
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.ndimage import gaussian_filter
from emd_utils import time_extension, Utility
from scipy.interpolate import CubicSpline
from emd_hilbert import Hilbert, hilbert_spectrum
from emd_preprocess import Preprocess
from emd_mean import Fluctuation
from AdvEMDpy import EMD
# alternate packages
from PyEMD import EMD as pyemd0215
import emd as emd040
sns.set(style='darkgrid')
pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001)
pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time)
pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series)
# plot 0 - addition
fig = plt.figure(figsize=(9, 4))
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('First Iteration of Sifting Algorithm')
plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1)
plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()],
pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()],
c='r', label=r'$M(t_i)$', zorder=2)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4)
plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()],
pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()],
c='c', label=r'$m(t_j)$', zorder=3)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5)
plt.yticks(ticks=[-2, -1, 0, 1, 2])
plt.xticks(ticks=[0, np.pi, 2 * np.pi],
labels=[r'0', r'$\pi$', r'$2\pi$'])
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/pseudo_algorithm.png')
plt.show()
knots = np.arange(12)
time = np.linspace(0, 11, 1101)
basis = emd_basis.Basis(time=time, time_series=time)
b_spline_basis = basis.cubic_b_spline(knots)
chsi_basis = basis.chsi_basis(knots)
# plot 1
plt.title('Non-Natural Cubic B-Spline Bases at Boundary')
plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $')
plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $')
plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $')
plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $')
plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $')
plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $'])
plt.xlim(4.4, 6.6)
plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
plt.legend(loc='upper left')
plt.savefig('jss_figures/boundary_bases.png')
plt.show()
# plot 1a - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
knots_uniform = np.linspace(0, 2 * np.pi, 51)
emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)
imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0]
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Series and Uniform Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Uniform Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Uniform Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, len(knots_uniform)):
axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_uniform.png')
plt.show()
# plot 1b - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)
imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric',
optimise_knots=1, verbose=False)
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Series and Statically Optimised Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Statically Optimised Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Statically Optimised Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, len(knots)):
axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_1.png')
plt.show()
# plot 1c - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)
imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric',
optimise_knots=2, verbose=False)
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Series and Dynamically Optimised Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Dynamically Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Dynamically Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, len(knots[i])):
axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_2.png')
plt.show()
# plot 1d - addition
window = 81
fig, axs = plt.subplots(2, 1)
fig.subplots_adjust(hspace=0.4)
figure_size = plt.gcf().get_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Preprocess Filtering Demonstration')
axs[1].set_title('Zoomed Region')
preprocess_time = pseudo_alg_time.copy()
np.random.seed(1)
random.seed(1)
preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time))
for i in random.sample(range(1000), 500):
preprocess_time_series[i] += np.random.normal(0, 1)
preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series)
axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple',
label=textwrap.fill('Noiseless time series', 12))
axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12))
axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13))
axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12))
axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1],
label=textwrap.fill('Windsorize interpolation filter', 14))
axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey',
label=textwrap.fill('Quantile window', 12))
axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey')
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black',
label=textwrap.fill('Zoomed region', 10))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black')
axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12))
axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12))
axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13))
axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12))
axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1],
label=textwrap.fill('Windsorize interpolation filter', 14))
axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey',
label=textwrap.fill('Quantile window', 12))
axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey')
axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi)
axs[1].set_ylim(-3, 3)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[np.pi])
axs[1].set_xticklabels(labels=[r'$\pi$'])
box_0 = axs[0].get_position()
axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15))
box_1 = axs[1].get_position()
axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height])
plt.savefig('jss_figures/preprocess_filter.png')
plt.show()
# plot 1e - addition
fig, axs = plt.subplots(2, 1)
fig.subplots_adjust(hspace=0.4)
figure_size = plt.gcf().get_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Preprocess Smoothing Demonstration')
axs[1].set_title('Zoomed Region')
axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple',
label=textwrap.fill('Noiseless time series', 12))
axs[0].plot(preprocess_time, preprocess.hp()[1],
label=textwrap.fill('Hodrick-Prescott smoothing', 12))
axs[0].plot(preprocess_time, preprocess.hw(order=51)[1],
label=textwrap.fill('Henderson-Whittaker smoothing', 13))
downsampled_and_decimated = preprocess.downsample()
axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1],
label=textwrap.fill('Downsampled & decimated', 11))
downsampled = preprocess.downsample(decimate=False)
axs[0].plot(downsampled[0], downsampled[1],
label=textwrap.fill('Downsampled', 13))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black',
label=textwrap.fill('Zoomed region', 10))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black')
axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple',
label=textwrap.fill('Noiseless time series', 12))
axs[1].plot(preprocess_time, preprocess.hp()[1],
label=textwrap.fill('Hodrick-Prescott smoothing', 12))
axs[1].plot(preprocess_time, preprocess.hw(order=51)[1],
label=textwrap.fill('Henderson-Whittaker smoothing', 13))
axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1],
label=textwrap.fill('Downsampled & decimated', 13))
axs[1].plot(downsampled[0], downsampled[1],
label=textwrap.fill('Downsampled', 13))
axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi)
axs[1].set_ylim(-3, 3)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[np.pi])
axs[1].set_xticklabels(labels=[r'$\pi$'])
box_0 = axs[0].get_position()
axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15))
box_1 = axs[1].get_position()
axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height])
plt.savefig('jss_figures/preprocess_smooth.png')
plt.show()
# plot 2
fig, axs = plt.subplots(1, 2, sharey=True)
axs[0].set_title('Cubic B-Spline Bases')
axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1')
axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2')
axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3')
axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4')
axs[0].legend(loc='upper left')
axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-')
axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-')
axs[0].set_xticks([5, 6])
axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $'])
axs[0].set_xlim(4.5, 6.5)
axs[1].set_title('Cubic Hermite Spline Bases')
axs[1].plot(time, chsi_basis[10, :].T, '--')
axs[1].plot(time, chsi_basis[11, :].T, '--')
axs[1].plot(time, chsi_basis[12, :].T, '--')
axs[1].plot(time, chsi_basis[13, :].T, '--')
axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
axs[1].set_xticks([5, 6])
axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $'])
axs[1].set_xlim(4.5, 6.5)
plt.savefig('jss_figures/comparing_bases.png')
plt.show()
# plot 3
a = 0.25
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_series = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_series=time_series)
max_bool = utils.max_bool_func_1st_order_fd()
maxima_x = time[max_bool]
maxima_y = time_series[max_bool]
min_bool = utils.min_bool_func_1st_order_fd()
minima_x = time[min_bool]
minima_y = time_series[min_bool]
max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101)
max_dash = maxima_y[-1] * np.ones_like(max_dash_time)
min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101)
min_dash = minima_y[-1] * np.ones_like(min_dash_time)
dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101)
dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101)
max_discard = maxima_y[-1]
max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1]
max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101)
max_discard_dash = max_discard * np.ones_like(max_discard_dash_time)
dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101)
dash_2 = np.linspace(minima_y[-1], max_discard, 101)
end_point_time = time[-1]
end_point = time_series[-1]
time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101)
time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi,
(5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi,
(5 - a) * np.pi, 101)))
time_series_anti_reflect = time_series_reflect[0] - time_series_reflect
utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect)
anti_max_bool = utils.max_bool_func_1st_order_fd()
anti_max_point_time = time_reflect[anti_max_bool]
anti_max_point = time_series_anti_reflect[anti_max_bool]
utils = emd_utils.Utility(time=time, time_series=time_series_reflect)
no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()]
no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()]
point_1 = 5.4
length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101)
length_distance_time = point_1 * np.pi * np.ones_like(length_distance)
length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101)
length_top = maxima_y[-1] * np.ones_like(length_time)
length_bottom = minima_y[-1] * np.ones_like(length_time)
point_2 = 5.2
length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101)
length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2)
length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101)
length_top_2 = time_series[-1] * np.ones_like(length_time_2)
length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2)
symmetry_axis_1_time = minima_x[-1] * np.ones(101)
symmetry_axis_2_time = time[-1] * np.ones(101)
symmetry_axis = np.linspace(-2, 2, 101)
end_time = np.linspace(time[-1] - width, time[-1] + width, 101)
end_signal = time_series[-1] * np.ones_like(end_time)
anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101)
anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time)
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.plot(time, time_series, LineWidth=2, label='Signal')
plt.title('Symmetry Edge Effects Example')
plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10))
plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2,
label=textwrap.fill('Anti-symmetric signal', 10))
plt.plot(max_dash_time, max_dash, 'k-')
plt.plot(min_dash_time, min_dash, 'k-')
plt.plot(dash_1_time, dash_1, 'k--')
plt.plot(dash_2_time, dash_2, 'k--')
plt.plot(length_distance_time, length_distance, 'k--')
plt.plot(length_distance_time_2, length_distance_2, 'k--')
plt.plot(length_time, length_top, 'k-')
plt.plot(length_time, length_bottom, 'k-')
plt.plot(length_time_2, length_top_2, 'k-')
plt.plot(length_time_2, length_bottom_2, 'k-')
plt.plot(end_time, end_signal, 'k-')
plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1)
plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1)
plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1)
plt.text(5.1 * np.pi, -0.7, r'$\beta$L')
plt.text(5.34 * np.pi, -0.05, 'L')
plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima')
plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10))
plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10))
plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10))
plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10))
plt.xlim(3.9 * np.pi, 5.5 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/edge_effects_symmetry_anti.png')
plt.show()
# plot 4
a = 0.21
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_series = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_series=time_series)
max_bool = utils.max_bool_func_1st_order_fd()
maxima_x = time[max_bool]
maxima_y = time_series[max_bool]
min_bool = utils.min_bool_func_1st_order_fd()
minima_x = time[min_bool]
minima_y = time_series[min_bool]
max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101)
max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101)
max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1)
max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1)
min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101)
min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101)
min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1)
min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1)
dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101)
dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101)
dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101)
dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101)
s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1])
slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2])
slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1
max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1)
max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101)
dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101)
dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101)
s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1])
slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2])
slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2
min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1)
min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101)
dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time)
dash_4 = np.linspace(slope_based_maximum, slope_based_minimum)
maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101)
maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash)
maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash)
maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash)
maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101)
maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time)
minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101)
minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash)
minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash)
minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash)
minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101)
minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time)
# slightly edit signal to make difference between slope-based method and improved slope-based method more clear
time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \
time_series[time == minima_x[-1]]
improved_slope_based_maximum_time = time[-1]
improved_slope_based_maximum = time_series[-1]
improved_slope_based_minimum_time = slope_based_minimum_time
improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time -
improved_slope_based_maximum_time)
min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101)
min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4)
dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101)
dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101)
ax = plt.subplot(111)
figure_size = plt.gcf().get_size_inches()
factor = 0.9
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
plt.plot(time, time_series, LineWidth=2, label='Signal')
plt.title('Slope-Based Edge Effects Example')
plt.plot(max_dash_time_1, max_dash_1, 'k-')
plt.plot(max_dash_time_2, max_dash_2, 'k-')
plt.plot(max_dash_time_3, max_dash_3, 'k-')
plt.plot(min_dash_time_1, min_dash_1, 'k-')
plt.plot(min_dash_time_2, min_dash_2, 'k-')
plt.plot(min_dash_time_3, min_dash_3, 'k-')
plt.plot(min_dash_time_4, min_dash_4, 'k-')
plt.plot(maxima_dash_time_1, maxima_dash, 'k-')
plt.plot(maxima_dash_time_2, maxima_dash, 'k-')
plt.plot(maxima_dash_time_3, maxima_dash, 'k-')
plt.plot(minima_dash_time_1, minima_dash, 'k-')
plt.plot(minima_dash_time_2, minima_dash, 'k-')
plt.plot(minima_dash_time_3, minima_dash, 'k-')
plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$')
plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$')
plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$')
plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$')
plt.text(4.30 * np.pi, 0.35, r'$s_1$')
plt.text(4.43 * np.pi, -0.20, r'$s_2$')
plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$')
plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]),
-0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$')
plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]),
1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$')
plt.plot(minima_line_dash_time, minima_line_dash, 'k--')
plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--')
plt.plot(dash_1_time, dash_1, 'k--')
plt.plot(dash_2_time, dash_2, 'k--')
plt.plot(dash_3_time, dash_3, 'k--')
plt.plot(dash_4_time, dash_4, 'k--')
plt.plot(dash_final_time, dash_final, 'k--')
plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima')
plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4,
label=textwrap.fill('Slope-based maximum', 11))
plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4,
label=textwrap.fill('Slope-based minimum', 11))
plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4,
label=textwrap.fill('Improved slope-based maximum', 11))
plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4,
label=textwrap.fill('Improved slope-based minimum', 11))
plt.xlim(3.9 * np.pi, 5.5 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2'))
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/edge_effects_slope_based.png')
plt.show()
# plot 5
a = 0.25
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_series = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_series=time_series)
max_bool = utils.max_bool_func_1st_order_fd()
maxima_x = time[max_bool]
maxima_y = time_series[max_bool]
min_bool = utils.min_bool_func_1st_order_fd()
minima_x = time[min_bool]
minima_y = time_series[min_bool]
A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2
A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2
P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2])
P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1])
Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1]
Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1]
Coughlin_time = Huang_time
Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0]))
Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2])
Average_max = (maxima_y[-2] + maxima_y[-1]) / 2
Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2])
Average_min = (minima_y[-2] + minima_y[-1]) / 2
utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave)
Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd()
Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd()
utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave)
Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd()
Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd()
Huang_max_time = Huang_time[Huang_max_bool]
Huang_max = Huang_wave[Huang_max_bool]
Huang_min_time = Huang_time[Huang_min_bool]
Huang_min = Huang_wave[Huang_min_bool]
Coughlin_max_time = Coughlin_time[Coughlin_max_bool]
Coughlin_max = Coughlin_wave[Coughlin_max_bool]
Coughlin_min_time = Coughlin_time[Coughlin_min_bool]
Coughlin_min = Coughlin_wave[Coughlin_min_bool]
max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101)
max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101)
max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time)
min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101)
min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101)
min_2_x = minima_y[-2] * np.ones_like(min_2_x_time)
dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101)
dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x)
max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101)
max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101)
max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y)
min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101)
min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101)
min_2_y_time = minima_x[-2] * np.ones_like(min_2_y)
dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101)
dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time)
max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101)
max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101)
max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time)
min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101)
min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101)
min_1_x = minima_y[-1] * np.ones_like(min_1_x_time)
dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101)
dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x)
max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101)
max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101)
max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y)
min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101)
min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101)
min_1_y_time = minima_x[-1] * np.ones_like(min_1_y)
dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101)
dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time)
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('Characteristic Wave Effects Example')
plt.plot(time, time_series, LineWidth=2, label='Signal')
plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10))
plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10))
plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4,
label=textwrap.fill('Coughlin maximum', 14))
plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4,
label=textwrap.fill('Coughlin minimum', 14))
plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4,
label=textwrap.fill('Average maximum', 14))
plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4,
label=textwrap.fill('Average minimum', 14))
plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima')
plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14))
plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14))
plt.plot(max_2_x_time, max_2_x, 'k-')
plt.plot(max_2_x_time_side, max_2_x, 'k-')
plt.plot(min_2_x_time, min_2_x, 'k-')
plt.plot(min_2_x_time_side, min_2_x, 'k-')
plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--')
plt.text(5.16 * np.pi, 0.85, r'$2a_2$')
plt.plot(max_2_y_time, max_2_y, 'k-')
plt.plot(max_2_y_time, max_2_y_side, 'k-')
plt.plot(min_2_y_time, min_2_y, 'k-')
plt.plot(min_2_y_time, min_2_y_side, 'k-')
plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--')
plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$')
plt.plot(max_1_x_time, max_1_x, 'k-')
plt.plot(max_1_x_time_side, max_1_x, 'k-')
plt.plot(min_1_x_time, min_1_x, 'k-')
plt.plot(min_1_x_time_side, min_1_x, 'k-')
plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--')
plt.text(5.42 * np.pi, -0.1, r'$2a_1$')
plt.plot(max_1_y_time, max_1_y, 'k-')
plt.plot(max_1_y_time, max_1_y_side, 'k-')
plt.plot(min_1_y_time, min_1_y, 'k-')
plt.plot(min_1_y_time, min_1_y_side, 'k-')
plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--')
plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$')
plt.xlim(3.9 * np.pi, 5.6 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/edge_effects_characteristic_wave.png')
plt.show()
# plot 6
t = np.linspace(5, 95, 100)
signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200)
util_nn = emd_utils.Utility(time=t, time_series=signal_orig)
maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()]
minima = signal_orig[util_nn.min_bool_func_1st_order_fd()]
cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima)
cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima)
time = np.linspace(0, 5 * np.pi, 1001)
lsq_signal = np.cos(time) + np.cos(5 * time)
knots = np.linspace(0, 5 * np.pi, 101)
time_extended = time_extension(time)
time_series_extended = np.zeros_like(time_extended) / 0
time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal
neural_network_m = 200
neural_network_k = 100
# forward ->
P = np.zeros((int(neural_network_k + 1), neural_network_m))
for col in range(neural_network_m):
P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))]
P[-1, col] = 1 # for additive constant
t = lsq_signal[-neural_network_m:]
# test - top
seed_weights = np.ones(neural_network_k) / neural_network_k
weights = 0 * seed_weights.copy()
train_input = P[:-1, :]
lr = 0.01
for iterations in range(1000):
output = np.matmul(weights, train_input)
error = (t - output)
gradients = error * (- train_input)
# guess average gradients
average_gradients = np.mean(gradients, axis=1)
# steepest descent
max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients)))
adjustment = - lr * average_gradients
# adjustment = - lr * max_gradient_vector
weights += adjustment
# test - bottom
weights_right = np.hstack((weights, 0))
max_count_right = 0
min_count_right = 0
i_right = 0
while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1):
time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \
sum(weights_right * np.hstack((time_series_extended[
int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right):
int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1)))
i_right += 1
if i_right > 1:
emd_utils_max = \
emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1):
int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)],
time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1):
int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)])
if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0:
max_count_right += 1
emd_utils_min = \
emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1):
int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)],
time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1):
int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)])
if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0:
min_count_right += 1
# backward <-
P = np.zeros((int(neural_network_k + 1), neural_network_m))
for col in range(neural_network_m):
P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)]
P[-1, col] = 1 # for additive constant
t = lsq_signal[:neural_network_m]
vx = cvx.Variable(int(neural_network_k + 1))
objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary
prob = cvx.Problem(objective)
result = prob.solve(verbose=True, solver=cvx.ECOS)
weights_left = np.array(vx.value)
max_count_left = 0
min_count_left = 0
i_left = 0
while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1):
time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \
2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left):
int(len(lsq_signal) - 1 - i_left + neural_network_k)],
1))) + 1
i_left += 1
if i_left > 1:
emd_utils_max = \
emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))],
time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))])
if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0:
max_count_left += 1
emd_utils_min = \
emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))],
time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))])
if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0:
min_count_left += 1
lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal)
utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended)
maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()]
maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()]
maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1]
maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1]
minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()]
minima_time = time[lsq_utils.min_bool_func_1st_order_fd()]
minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:]
minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:]
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('Single Neuron Neural Network Example')
plt.plot(time, lsq_signal, zorder=2, label='Signal')
plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12))
plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima')
plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima')
plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3,
label=textwrap.fill('Extrapolated maxima', 12))
plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4,
label=textwrap.fill('Extrapolated minima', 12))
plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k',
label=textwrap.fill('Neural network inputs', 13))
plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100),
-2.75 * np.ones(100), c='k')
plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100),
2.75 * np.ones(100), c='k')
plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2),
((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k')
plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2),
((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k')
plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k')
plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed',
label=textwrap.fill('Neural network targets', 13))
plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100),
-2.75 * np.ones(100), c='gray')
plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100),
2.75 * np.ones(100), c='gray')
plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2),
((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray')
plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2),
((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray')
plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray',
linestyle='dashed')
plt.xlim(3.4 * np.pi, 5.6 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/neural_network.png')
plt.show()
# plot 6a
np.random.seed(0)
time = np.linspace(0, 5 * np.pi, 1001)
knots_51 = np.linspace(0, 5 * np.pi, 51)
time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time)
noise = np.random.normal(0, 1, len(time_series))
time_series += noise
advemdpy = EMD(time=time, time_series=time_series)
imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3,
edge_effect='symmetric_anchor', verbose=False)[:3]
knots_31 = np.linspace(0, 5 * np.pi, 31)
imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2,
edge_effect='symmetric_anchor', verbose=False)[:3]
knots_11 = np.linspace(0, 5 * np.pi, 11)
imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1,
edge_effect='symmetric_anchor', verbose=False)[:3]
fig, axs = plt.subplots(3, 1)
plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40))
plt.subplots_adjust(hspace=0.1)
axs[0].plot(time, time_series, label='Time series')
axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21))
print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}')
for knot in knots_51:
axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots')
axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi])
axs[0].set_xticklabels(['', '', '', '', '', ''])
axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--')
axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--')
axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--')
axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region')
box_0 = axs[0].get_position()
axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
axs[1].plot(time, time_series, label='Time series')
axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19))
axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19))
print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}')
for knot in knots_31:
axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[1].plot(knot * np.ones(101), | np.linspace(-5, 5, 101) | numpy.linspace |
import sys # We need sys so that we can pass argv to QApplication
from pathlib import Path
from dataclasses import dataclass
import numpy as np
from PyQt5 import QtWidgets, QtCore, QtGui, uic
import pyqtgraph as pg
import easyqc.qt
COLOR_PLOTS = (pg.mkColor((31, 119, 180)),)
class EasyQC(QtWidgets.QMainWindow):
"""
This is the view in the MVC approach
"""
layers = None # used for additional scatter layers
@staticmethod
def _instances():
app = QtWidgets.QApplication.instance()
return [w for w in app.topLevelWidgets() if isinstance(w, EasyQC)]
@staticmethod
def _get_or_create(title=None):
eqc = next(filter(lambda e: e.isVisible() and e.windowTitle() == title,
EasyQC._instances()), None)
if eqc is None:
eqc = EasyQC()
eqc.setWindowTitle(title)
return eqc
def __init__(self, *args, **kwargs):
super(EasyQC, self).__init__(*args, **kwargs)
# wave by <NAME> from the Noun Project
self.layers = {}
self.ctrl = Controller(self)
uic.loadUi(Path(__file__).parent.joinpath('easyqc.ui'), self)
self.setWindowIcon(QtGui.QIcon(str(Path(__file__).parent.joinpath('easyqc.svg'))))
background_color = self.palette().color(self.backgroundRole())
# init the seismic density display
self.plotItem_seismic.setAspectLocked(False)
self.imageItem_seismic = pg.ImageItem()
self.plotItem_seismic.setBackground(background_color)
self.plotItem_seismic.addItem(self.imageItem_seismic)
self.viewBox_seismic = self.plotItem_seismic.getPlotItem().getViewBox()
self._init_cmenu()
# init the header display and link X and Y axis with density display
self.plotDataItem_header_h = pg.PlotDataItem()
self.plotItem_header_h.addItem(self.plotDataItem_header_h)
self.plotItem_seismic.setXLink(self.plotItem_header_h)
self.plotDataItem_header_v = pg.PlotDataItem()
self.plotItem_header_h.setBackground(background_color)
self.plotItem_header_v.addItem(self.plotDataItem_header_v)
self.plotItem_header_v.setBackground(background_color)
self.plotItem_seismic.setYLink(self.plotItem_header_v)
# set the ticks so that they don't auto scale and ruin the axes link
ax = self.plotItem_seismic.getAxis('left')
ax.setStyle(tickTextWidth=60, autoReduceTextSpace=False, autoExpandTextSpace=False)
ax = self.plotItem_header_h.getAxis('left')
ax.setStyle(tickTextWidth=60, autoReduceTextSpace=False, autoExpandTextSpace=False)
ax = self.plotItem_header_v.getAxis('left')
ax.setStyle(showValues=False)
# prepare placeholders for hover windows
self.hoverPlotWidgets = {'Trace': None, 'Spectrum': None, 'Spectrogram': None}
# connect signals and slots
s = self.viewBox_seismic.scene()
# vb.scene().sigMouseMoved.connect(self.mouseMoveEvent)
self.proxy = pg.SignalProxy(s.sigMouseMoved, rateLimit=60, slot=self.mouseMoveEvent)
s.sigMouseClicked.connect(self.mouseClick)
self.lineEdit_gain.returnPressed.connect(self.editGain)
self.lineEdit_sort.returnPressed.connect(self.editSort)
self.comboBox_header.activated[str].connect(self.ctrl.set_header)
self.viewBox_seismic.sigRangeChanged.connect(self.on_sigRangeChanged)
self.horizontalScrollBar.sliderMoved.connect(self.on_horizontalSliderChange)
self.verticalScrollBar.sliderMoved.connect(self.on_verticalSliderChange)
def _init_cmenu(self):
"""
Setup context menus - on instantiation only
"""
self.viewBox_seismic.scene().contextMenu = None # this gets rid of the export context menu
self.plotItem_seismic.plotItem.ctrlMenu = None # this gets rid of the plot context menu
for act in self.viewBox_seismic.menu.actions():
if act.text() == 'View All':
continue
self.viewBox_seismic.menu.removeAction(act)
# and add ours
self.viewBox_seismic.menu.addSeparator()
act = QtWidgets.QAction("View Trace", self.viewBox_seismic.menu)
act.triggered.connect(self.cmenu_ViewTrace)
self.viewBox_seismic.menu.addAction(act)
act = QtWidgets.QAction("View Spectrum", self.viewBox_seismic.menu)
act.triggered.connect(self.cmenu_ViewSpectrum)
self.viewBox_seismic.menu.addAction(act)
act = QtWidgets.QAction("View Spectrogram", self.viewBox_seismic.menu)
act.triggered.connect(self.cmenu_ViewSpectrogram)
self.viewBox_seismic.menu.addAction(act)
"""
View Methods
"""
def closeEvent(self, event):
self.destroy()
def keyPressEvent(self, e):
"""
page-up / ctrl + a : gain up
page-down / ctrl + z : gain down
ctrl + p : propagate display to current windows
up/down/left/right arrows: pan using keys
:param e:
"""
k, m = (e.key(), e.modifiers())
# page up / ctrl + a
if k == QtCore.Qt.Key_PageUp or (
m == QtCore.Qt.ControlModifier and k == QtCore.Qt.Key_A):
self.ctrl.set_gain(self.ctrl.gain - 3)
# page down / ctrl + z
elif k == QtCore.Qt.Key_PageDown or (
m == QtCore.Qt.ControlModifier and k == QtCore.Qt.Key_Z):
self.ctrl.set_gain(self.ctrl.gain + 3)
# control + P: propagate
elif m == QtCore.Qt.ControlModifier and k == QtCore.Qt.Key_P:
self.ctrl.propagate()
# arrows keys move seismic
elif k in (QtCore.Qt.Key_Up, QtCore.Qt.Key_Left, QtCore.Qt.Key_Right, QtCore.Qt.Key_Down):
self.translate_seismic(k, m == QtCore.Qt.ControlModifier)
# ctrl + s: screenshot to clipboard
elif m == QtCore.Qt.ControlModifier and k == QtCore.Qt.Key_S:
qtapp = QtWidgets.QApplication.instance()
qtapp.clipboard().setPixmap(self.grab())
def editGain(self):
self.ctrl.set_gain()
def editSort(self):
keys = self.lineEdit_sort.text().split(' ')
self.ctrl.sort(keys)
def mouseClick(self, event):
if not event.double():
return
qxy = self.imageItem_seismic.mapFromScene(event.scenePos())
tr, s = (qxy.x(), qxy.y())
print(tr, s)
def mouseMoveEvent(self, scenepos):
if isinstance(scenepos, tuple):
scenepos = scenepos[0]
else:
return
qpoint = self.imageItem_seismic.mapFromScene(scenepos)
c, t, a, h = self.ctrl.cursor2timetraceamp(qpoint)
self.label_x.setText(f"{c:.0f}")
self.label_t.setText(f"{t:.4f}")
self.label_amp.setText(f"{a:2.2E}")
htxt = h if isinstance(h, str) else f"{h:.4f}"
self.label_h.setText(htxt)
for key in self.hoverPlotWidgets:
if self.hoverPlotWidgets[key] is not None and self.hoverPlotWidgets[key].isVisible():
self.ctrl.update_hover(qpoint, key)
def translate_seismic(self, k, cm):
"""
Resizes vertical or horizontal on a KeyPress
:param k: translate by 1./7
:param cm (bool): if the control modifier has been pressed, translate by 1./2
:return:
"""
r = self.viewBox_seismic.viewRect()
xlim, ylim = self.ctrl.limits()
FAC = 1 / 2 if cm else 1 / 7
dy = FAC * r.height()
dx = FAC * r.width()
if k == QtCore.Qt.Key_Down:
yr = np.array([r.y(), r.y() + r.height()]) + dy
yr += np.min([0, ylim[1] - yr[1]])
self.viewBox_seismic.setYRange(yr[0], yr[1], padding=0)
elif k == QtCore.Qt.Key_Left:
xr = np.array([r.x(), r.x() + r.width()]) - dx
xr += np.max([0, xlim[0] - xr[0]])
self.viewBox_seismic.setXRange(xr[0], xr[1], padding=0)
elif k == QtCore.Qt.Key_Right:
xr = np.array([r.x(), r.x() + r.width()]) + dx
xr += np.min([0, xlim[1] - xr[1]])
self.viewBox_seismic.setXRange(xr[0], xr[1], padding=0)
elif k == QtCore.Qt.Key_Up:
yr = np.array([r.y(), r.y() + r.height()]) - dy
yr += np.max([0, ylim[0] - yr[0]])
self.viewBox_seismic.setYRange(yr[0], yr[1], padding=0)
def on_sigRangeChanged(self, r):
def set_scroll(sb, r, b):
# sb: scroll bar object, r: current range, b: axes limits (bounds)
# cf. https://doc.qt.io/qt-5/qscrollbar.html
range = (r[1] - r[0])
doclength = (b[1] - b[0])
maximum = int((doclength - range) / doclength * 65536)
sb.setMaximum(maximum)
sb.setPageStep(65536 - maximum)
sb.setValue(int((r[0] - b[0]) / doclength * 65536))
xr, yr = self.viewBox_seismic.viewRange()
xl, yl = self.ctrl.limits()
set_scroll(self.horizontalScrollBar, xr, xl)
set_scroll(self.verticalScrollBar, yr, yl)
def on_horizontalSliderChange(self, r):
b = self.ctrl.limits()[0]
r = self.viewBox_seismic.viewRange()[0]
x = float(self.horizontalScrollBar.value()) / 65536 * (b[1] - b[0]) + b[0]
self.viewBox_seismic.setXRange(x, x + r[1] - r[0], padding=0)
def on_verticalSliderChange(self, r):
b = self.ctrl.limits()[1]
r = self.viewBox_seismic.viewRange()[1]
y = float(self.verticalScrollBar.value()) / 65536 * (b[1] - b[0]) + b[0]
self.viewBox_seismic.setYRange(y, y + r[1] - r[0], padding=0)
def _cmenu_hover(self, key, image=False):
"""Creates the plot widget for a given key: could be 'Trace', 'Spectrum', or 'Spectrogram'"""
if self.hoverPlotWidgets[key] is None:
from easyqc.pgtools import ImShowItem
if image:
self.hoverPlotWidgets[key] = ImShowItem().plotwidget
else:
self.hoverPlotWidgets[key] = pg.plot([0], [0], pen=pg.mkPen(color=COLOR_PLOTS[0]))
self.hoverPlotWidgets[key].setBackground(pg.mkColor('#ffffff'))
self.hoverPlotWidgets[key].setVisible(True)
def cmenu_ViewTrace(self):
self._cmenu_hover('Trace')
def cmenu_ViewSpectrum(self):
self._cmenu_hover('Spectrum')
def cmenu_ViewSpectrogram(self):
self._cmenu_hover('Spectrogram', image=True)
class Controller:
def __init__(self, view):
self.view = view
self.model = Model(None, None)
self.order = None
self.transform = None # affine transform image indices 2 data domain
self.trace_indices = None
self.hkey = None
def remove_all_layers(self):
layers_dict = self.view.layers.copy()
for label in layers_dict:
self.remove_layer_from_label(label)
def remove_layer_from_label(self, label):
current_layer = self.view.layers.get(label)
if current_layer is not None:
current_layer['layer'].clear()
self.view.plotItem_seismic.removeItem(current_layer['layer'])
self.view.layers.pop(label)
def add_scatter(self, x, y, rgb=None, label='default'):
"""
Adds a scatter layer to the display (removing any previous one if any)
"""
rgb = rgb or (0, 255, 0)
self.remove_layer_from_label(label)
new_scatter = pg.ScatterPlotItem()
self.view.layers[label] = {'layer': new_scatter, 'type': 'scatter'}
self.view.plotItem_seismic.addItem(new_scatter)
new_scatter.setData(x=x, y=y, brush=pg.mkBrush(rgb), name=label)
def cursor2timetraceamp(self, qpoint):
"""Used for the mouse hover function over seismic display, returns trace, time,
amplitude,and header"""
ixy = self.cursor2ind(qpoint)
a = self.model.data[ixy[0], ixy[1]]
xy_ = np.matmul(self.transform, np.array([ixy[0], ixy[1], 1]))
t = xy_[self.model.taxis]
c = xy_[self.model.caxis]
h = self.model.header[self.hkey][ixy[self.model.caxis]]
return c, t, a, h
def cursor2ind(self, qpoint):
""" image coordinates over the seismic display"""
ix = np.max((0, np.min((int(np.floor(qpoint.x())), self.model.nx - 1))))
iy = np.max((0, np.min((int(np.round(qpoint.y())), self.model.ny - 1))))
return ix, iy
def limits(self):
# returns the xlims and ylims of the data in the data space (time, trace)
ixlim = [0, self.model.nx]
iylim = [0, self.model.ny]
x, y, _ = np.matmul(self.transform, np.c_[ixlim, iylim, [1, 1]].T)
return x, y
def propagate(self):
""" set all the eqc instances at the same position/gain scales for flip comparisons """
eqcs = self.view._instances()
for eqc in eqcs:
if eqc is self.view:
continue
else:
eqc.setGeometry(self.view.geometry())
eqc.ctrl.set_gain(self.gain)
eqc.plotItem_seismic.setXLink(self.view.plotItem_seismic)
eqc.plotItem_seismic.setYLink(self.view.plotItem_seismic)
# also propagate sorting
eqc.lineEdit_sort.setText(self.view.lineEdit_sort.text())
eqc.ctrl.sort(eqc.lineEdit_sort.text())
def redraw(self):
""" redraw seismic and headers with order and selection"""
# np.take could look neater but it's actually much slower than straight indexing
if self.model.taxis == 1:
self.view.imageItem_seismic.setImage(self.model.data[self.trace_indices, :])
elif self.model.taxis == 0:
self.view.imageItem_seismic.setImage(self.model.data[:, self.trace_indices])
self.set_header()
self.set_gain()
def set_gain(self, gain=None):
if gain is None:
gain = self.gain
levels = 10 ** (gain / 20) * 4 * np.array([-1, 1])
self.view.imageItem_seismic.setLevels(levels)
self.view.lineEdit_gain.setText(f"{gain:.1f}")
@property
def gain(self):
return float(self.view.lineEdit_gain.text()) or self.model.auto_gain()
def set_header(self):
key = self.view.comboBox_header.currentText()
if key not in self.model.header.keys():
return
self.hkey = key
traces = np.arange(self.trace_indices.size)
values = self.model.header[self.hkey][self.trace_indices]
# skip the plotting part for non-numeric arrays
if not np.issubdtype(values.dtype, np.number):
return
if self.model.taxis == 1:
self.view.plotDataItem_header_h.setData(x=traces, y=values)
elif self.model.taxis == 0:
self.view.plotDataItem_header_v.setData(y=traces, x=values)
def sort(self, keys):
if not(set(keys).issubset(set(self.model.header.keys()))):
print("Wrong input")
return
elif len(keys) == 0:
return
self.trace_indices = np.lexsort([self.model.header[k] for k in keys])
self.redraw()
def update_data(self, data, h=None, si=.002, gain=None, x0=0, t0=0, taxis=1):
"""
data is a 2d array [ntr, nsamples]
if 3d the first dimensions are merged in ntr and the last is nsamples
update_data(self, data=None, h=0.002, gain=None)
"""
# reshape a 3d+ array in 2d to plot as an image
self.remove_all_layers()
# if the data has the same shape as the current model data, keep axis all the same
update_axis = self.model.data is None or self.model.data.shape != data.shape
if data.ndim >= 3:
data = np.reshape(data, (-1, data.shape[-1]))
self.model.set_data(data, si=si, header=h, x0=x0, t0=t0, taxis=taxis)
self.trace_indices = np.arange(self.model.ntr) # this will contain selection and sort
clim = [x0 - .5, x0 + self.model.ntr - .5]
tlim = [t0, t0 + self.model.ns * self.model.si]
if taxis == 0: # time is the 0 dimension and the horizontal axis
xlim, ylim = (tlim, clim)
transform = [si, 0., 0., 0., 1, 0., t0 - si / 2, x0 - .5, 1.]
self.view.imageItem_seismic.setImage(data[:, self.trace_indices])
elif taxis == 1: # time is the 1 dimension and vertical axis
xlim, ylim = (clim, tlim)
transform = [1., 0., 0., 0., si, 0., x0 - .5, t0 - si / 2, 1.]
self.view.imageItem_seismic.setImage(data[self.trace_indices, :])
self.view.plotItem_seismic.invertY()
else:
ValueError('taxis must be 0 (horizontal axis) or 1 (vertical axis)')
self.transform = np.array(transform).reshape((3, 3)).T
self.view.imageItem_seismic.setTransform(QtGui.QTransform(*transform))
self.view.plotItem_header_h.setLimits(xMin=xlim[0], xMax=xlim[1])
self.view.plotItem_header_v.setLimits(yMin=ylim[0], yMax=ylim[1])
self.view.plotItem_seismic.setLimits(xMin=xlim[0], xMax=xlim[1], yMin=ylim[0], yMax=ylim[1])
# reset the view
if update_axis:
xlim, ylim = self.limits()
self.view.viewBox_seismic.setXRange(*xlim, padding=0)
self.view.viewBox_seismic.setYRange(*ylim, padding=0)
# set the header combo box keys
if isinstance(self.model.header, dict):
self.view.comboBox_header.clear()
for hname in self.model.header.keys():
self.view.comboBox_header.addItem(hname)
self.set_gain(gain=gain)
self.set_header()
def update_hover(self, qpoint, key):
c, _, _, _ = self.cursor2timetraceamp(qpoint)
if key == 'Trace':
plotitem = self.view.hoverPlotWidgets[key].getPlotItem()
plotitem.items[0].setData(self.model.tscale, self.model.get_trace(c))
plotitem.setXRange(*self.trange)
elif key == 'Spectrum':
plotitem = self.view.hoverPlotWidgets[key].getPlotItem()
plotitem.items[0].setData(*self.model.get_trace_spectrum(c, trange=self.trange))
elif key == 'Spectrogram':
imageshowitem = self.view.hoverPlotWidgets[key].imageshowitem
fscale, tscale, tf = self.model.get_trace_spectrogram(c, trange=self.trange)
imageshowitem.set_image(tf, tscale, fscale)
@property
def trange(self):
"""
returns the current time range of the view
:return: 2 floats list
"""
return self.view.viewBox_seismic.viewRange()[self.model.taxis]
@property
def crange(self):
"""
returns the current channel range of the view
:return: 2 floats list
"""
return self.view.viewBox_seismic.viewRange()[self.model.caxis]
@dataclass
class Model:
"""Class for keeping track of the visualized data"""
data: np.array
header: np.array
si: float = 1.
def auto_gain(self) -> float:
rmsnan = np.nansum(self.data ** 2, axis=self.taxis) / np.sum(
~np.isnan(self.data), axis=self.taxis)
return 20 * np.log10(np.median(np.sqrt(rmsnan)))
def get_trace_spectrogram(self, c, trange=None):
from scipy.signal import spectrogram
tr = self.get_trace(c, trange=trange)
fscale, tscale, tf = spectrogram(tr, fs=1 / self.si, nperseg=50, nfft=512, window='cosine', noverlap=48)
tscale += trange[0]
tf = 20 * np.log10(tf + np.finfo(float).eps)
return fscale, tscale, tf
def get_trace_spectrum(self, c, trange=None):
tr = self.get_trace(c, trange=trange)
psd = 20 * np.log10(np.abs(np.fft.rfft(tr)) - | np.finfo(float) | numpy.finfo |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = | np.array([]) | numpy.array |
import numpy as np
from scipy.optimize import minimize
import math
"""Diagonalize the Jaynes-Cummings ladder of energies"""
def get_order(omega_r, qubit_energy_list):
"""Get the order of eigenenergies that diagonalizer produces.
Use the bare energy of the system as reference.
Args:
omega_r (float): Resonator frequency
qubit_energy_list (List[float]): Qubit energies
Returns:
(List[int]): Indices giving the proper order of the elements in
qubit_energy_list.
"""
tmax = len(qubit_energy_list) # Maximum number of transmon levels
order = np.zeros(tmax)
# Bare energies of the system
diag_bare = np.array([-i*omega_r + qubit_energy_list[i]
for i in range(tmax)])
# Eigenenergies in the order produced by diagonalizer
eigensolver_order = np.linalg.eigvalsh(np.diag(diag_bare))
# Find where the diagonalizer puts the energies
for i in range(tmax):
index, = | np.where(eigensolver_order==diag_bare[i]) | numpy.where |
import os
import numpy as np
import matplotlib.pyplot as plt
import cv2
import torch
import torch.nn as nn
import torchvision.models as models
from model import Unet
from utils.dataloader import read_data_path, MaskDataset
from torch.utils.data import DataLoader
from utils.config import Config
from utils.loss import dice_score
# Hyperparameter
config = Config()
TRAIN_TEST_SPLIT = config.TRAIN_TEST_SPLIT
BATCH_SIZE_VALIDATION = config.BATCH_SIZE_VALIDATION
BATCH_SIZE_TESTING = config.BATCH_SIZE_TESTING
PRED_SAVE_DIR = config.PRED_SAVE_DIR
os.makedirs(PRED_SAVE_DIR, exist_ok=True)
INFERENCE_WEIGHT = config.INFERENCE_WEIGHT
# Use torch cuda
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Import Resnet-50 as base network, modify first layer
model_ft = models.resnet50(pretrained=True)
model_ft.conv1 = nn.Conv2d(1, 64, kernel_size=(3, 3), stride=(2, 2), padding=(3, 3), bias=False)
# Add Residual layer in unet
model = Unet(model_ft)
model.to(device)
if INFERENCE_WEIGHT:
model.load_state_dict(torch.load(INFERENCE_WEIGHT))
# Read data path, make in dataloader
"""
read_data_path
input: (float), the split of train and test
return: (list, list, list), train & valid & test file path list
list -> (img_path, mask_path)
"""
training_list, validation_list, testing_list = read_data_path(TRAIN_TEST_SPLIT)
val_dataset = MaskDataset(validation_list)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE_VALIDATION, shuffle=False, drop_last=True)
# Confusion matrix, postive = abnormal; negative = normal
TP = 0; FP = 0
FN = 0; TN = 0
"""
TP => mask: abnormal, pred: abnormal
FP => mask: normal, pred: abnormal
FN => mask: abnormal, pred: normal
TN => mask: normal, pred: normal
"""
dice_score_list = []
number = 0
with torch.no_grad():
for imgs, masks in val_loader:
imgs_gpu = imgs.to(device)
outputs = model(imgs_gpu)
outputs = torch.round(outputs) * 255
masks = masks.to(device)
# Dice score list
dice_scores = dice_score(outputs, masks)
dice_score_list.extend([dice_scores.item()])
for index in range(BATCH_SIZE_VALIDATION):
img_origin = np.reshape(imgs_gpu[index].cpu().numpy(), (256, 256))
pred_img = np.reshape(outputs[index].cpu().numpy(), (256, 256))
mask_img = np.reshape(masks[index].cpu().numpy()*255, (256, 256))
# Confusion Matrix
if np.sum(mask_img)!=0 and np.sum(pred_img)!=0: TP += 1
if np.sum(mask_img)==0 and np.sum(pred_img)!=0: FP += 1
if np.sum(mask_img)!=0 and np.sum(pred_img)==0: FN += 1
if np.sum(mask_img)==0 and np.sum(pred_img)==0: TN += 1
number += 1
print(number)
if | np.all(mask_img==0) | numpy.all |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 2006-2010 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
standalone = True
import unittestX as unittest
import journal
debug = journal.debug( "Broadened_E_Q_Kernel_TestCase" )
warning = journal.warning( "Broadened_E_Q_Kernel_TestCase" )
import mcni
from mccomposite import mccompositebp
from mccomponents import mccomponentsbp
class TestCase(unittest.TestCase):
def test(self):
E_Q = "Q*Q/3."
S_Q = "1"
sigma_Q = "Q/2."
Qmin = 0; Qmax = 10
absorption_coefficient = scattering_coefficient = 1.
kernel = mccomponentsbp.create_Broadened_E_Q_Kernel(
E_Q, S_Q, sigma_Q,
Qmin, Qmax,
absorption_coefficient,
scattering_coefficient,
)
ei = 500 # meV
from mcni.utils import conversion
vil = conversion.e2v(ei)
vi = (0,0,vil)
import numpy.linalg as nl
import numpy as np
for i in range(10):
event = mcni.neutron(
r = (0,0,0), v = vi,
prob = 1, time = 0 )
kernel.scatter( event );
vf = np.array(event.state.velocity)
diffv = vi - vf
Q = conversion.v2k( | nl.norm(diffv) | numpy.linalg.norm |
import pytest, numbers, warnings
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
from scipy.sparse import rand as sprand
from scipy import optimize
from pyuoi import UoI_L1Logistic
from pyuoi.linear_model.logistic import (fit_intercept_fixed_coef,
MaskedCoefLogisticRegression,
LogisticInterceptFitterNoFeatures,
_logistic_regression_path,
_multinomial_loss_grad,
_logistic_loss_and_grad)
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.utils import (compute_class_weight,
check_consistent_length, check_array)
from sklearn.exceptions import ConvergenceWarning
from pyuoi.datasets import make_classification
from pyuoi.lbfgs import fmin_lbfgs, AllZeroLBFGSError
def _logistic_regression_path_old(X, y, Cs=48, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0, coef=None,
class_weight=None, penalty='l2',
multi_class='auto',
check_input=True,
sample_weight=None,
l1_ratio=None, coef_mask=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is the original function used to check the new indexing-based
version rather than the masking version implemented here.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
multi_class : str, {'multinomial', 'auto'}, default: 'auto'
For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'auto' selects binary if the data is binary
and otherwise selects 'multinomial'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
coef_mask : array-like, shape (n_features), (n_classes, n_features) optional
Masking array for coef.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64,
accept_large_sparse=True)
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
if multi_class == 'auto':
if len(classes) > 2:
multi_class = 'multinomial'
else:
multi_class = 'ovr'
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=X.dtype, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes=classes, y=y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
coef_size = n_features
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask_classes = np.array([-1, 1])
mask = (y == 1)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight,
classes=mask_classes,
y=y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
coef_size = classes.size * n_features
lbin = OneHotEncoder(categories=[range(classes.size)], sparse=False)
Y_multi = lbin.fit_transform(y[:, np.newaxis])
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
dtype=X.dtype)
w0[:, -1] = LogisticInterceptFitterNoFeatures(y,
classes.size).intercept_
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
w0[:, :coef.shape[1]] = coef
# Mask initial array
if coef_mask is not None:
if multi_class == 'ovr':
w0[:n_features] *= coef_mask
else:
w0[:, :n_features] *= coef_mask
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
target = Y_multi
if penalty == 'l2':
w0 = w0.ravel()
def func(x, *args):
return _multinomial_loss_grad(x, *args)[0:2]
else:
w0 = w0.T.ravel().copy()
def inner_func(x, *args):
return _multinomial_loss_grad(x, *args)[0:2]
def func(x, g, *args):
x = x.reshape(-1, classes.size).T.ravel()
loss, grad = inner_func(x, *args)
grad = grad.reshape(classes.size, -1).T.ravel()
g[:] = grad
return loss
else:
target = y_bin
if penalty == 'l2':
func = _logistic_loss_and_grad
else:
def func(x, g, *args):
loss, grad = _logistic_loss_and_grad(x, *args)
g[:] = grad
return loss
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
if penalty == 'l2':
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, coef_mask, sample_weight),
iprint=iprint, pgtol=tol, maxiter=max_iter)
else:
zeros_seen = [0]
def zero_coef(x, *args):
if multi_class == 'multinomial':
x = x.reshape(-1, classes.size)[:-1]
else:
x = x[:-1]
now_zeros = np.array_equiv(x, 0.)
if now_zeros:
zeros_seen[0] += 1
else:
zeros_seen[0] = 0
if zeros_seen[0] > 1:
return -2048
try:
w0 = fmin_lbfgs(func, w0, orthantwise_c=1. / C,
args=(X, target, 0., coef_mask, sample_weight),
max_iterations=max_iter,
epsilon=tol,
orthantwise_end=coef_size,
progress=zero_coef)
except AllZeroLBFGSError:
w0 *= 0.
info = None
if info is not None and info["warnflag"] == 1:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.", ConvergenceWarning)
# In scipy <= 1.0.0, nit may exceed maxiter.
# See https://github.com/scipy/scipy/issues/7854.
if info is None:
n_iter_i = -1
else:
n_iter_i = min(info['nit'], max_iter)
if multi_class == 'multinomial':
n_classes = max(2, classes.size)
if penalty == 'l2':
multi_w0 = np.reshape(w0, (n_classes, -1))
else:
multi_w0 = np.reshape(w0, (-1, n_classes)).T
if coef_mask is not None:
multi_w0[:, :n_features] *= coef_mask
coefs.append(multi_w0.copy())
else:
if coef_mask is not None:
w0[:n_features] *= coef_mask
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return np.array(coefs), np.array(Cs), n_iter
def test_fit_intercept_fixed_coef():
"""Test that the intercept in fit correctly for fixed coefficients."""
X = np.zeros((6, 5))
coef = np.ones((1, 5))
y = np.ones(6, dtype=int)
y[:3] = 0
b = fit_intercept_fixed_coef(X, coef, y, 2)
assert_allclose(b, 0.)
X = np.zeros((7, 5))
y = np.ones(7, dtype=int)
y[:3] = 0
b = fit_intercept_fixed_coef(X, coef, y, 3)
assert_allclose(b.argmax(), 1)
assert_allclose(b.argmin(), 2)
def test_fit_intercept_no_features():
"""Test that the intercept in fit correctly for fixed coefficients."""
X = np.zeros((5, 1))
y = np.ones(6, dtype=int)
y[:3] = 0
LR = LogisticInterceptFitterNoFeatures(y, 1)
b = LR.intercept_
assert_allclose(b, 0.)
y = np.ones(7, dtype=int)
y[:3] = 0
LR = LogisticInterceptFitterNoFeatures(y, 1)
yhat = LR.predict(X)
assert_allclose(yhat, 1)
py = LR.predict_proba(X)
assert np.all(py > .5)
y = np.ones(7, dtype=int)
y[:3] = 0
LR = LogisticInterceptFitterNoFeatures(y, 3)
yhat = LR.predict(X)
assert_allclose(yhat, 1)
py = LR.predict_proba(X)
assert_allclose(py.argmax(axis=1), 1)
assert_allclose(py.argmin(axis=1), 2)
def test_l1logistic_intercept():
"""Test that binary L1 Logistic fits an intercept when run."""
for fi in [True, False]:
X, y, w, b = make_classification(n_samples=100,
random_state=11,
n_features=4,
w_scale=4.,
include_intercept=fi)
l1log = UoI_L1Logistic(fit_intercept=fi,
n_boots_sel=3,
n_boots_est=3).fit(X, y)
if not fi:
assert_array_equal(l1log.intercept_, 0.)
else:
l1log.intercept_
def test_l1logistic_binary():
"""Test that binary L1 Logistic runs in the UoI framework."""
n_inf = 10
X, y, w, b = make_classification(n_samples=200,
random_state=6,
n_informative=n_inf,
n_features=20,
w_scale=4.,
include_intercept=True)
l1log = UoI_L1Logistic(random_state=10).fit(X, y)
l1log = UoI_L1Logistic(random_state=10, fit_intercept=False).fit(X, y)
l1log.predict_proba(X)
l1log.predict_log_proba(X)
y_hat = l1log.predict(X)
assert_equal(accuracy_score(y, y_hat), l1log.score(X, y))
assert (np.sign(abs(w)) == np.sign(abs(l1log.coef_))).mean() >= .8
def test_l1logistic_binary_multinomial():
"""Test that binary L1 Logistic runs in the UoI framework
using multi_class='multinomial'."""
n_inf = 10
X, y, w, b = make_classification(n_samples=200,
random_state=6,
n_informative=n_inf,
n_features=20,
w_scale=4.,
include_intercept=True)
UoI_L1Logistic(random_state=10, multi_class='multinomial').fit(X, y)
UoI_L1Logistic(random_state=10, fit_intercept=False,
multi_class='multinomial').fit(X, y)
def test_l1logistic_no_ovr():
"""Test that binary L1 Logistic model raises an error for
multiclass='ovr'."""
with pytest.raises(ValueError):
UoI_L1Logistic(multi_class='ovr')
def test_l1logistic_multiclass():
"""Test that multiclass L1 Logistic runs in the UoI framework when all
classes share a support."""
n_features = 20
n_inf = 10
X, y, w, b = make_classification(n_samples=200,
random_state=10,
n_classes=5,
n_informative=n_inf,
n_features=n_features,
shared_support=True,
w_scale=4.)
l1log = UoI_L1Logistic().fit(X, y)
l1log.predict_proba(X)
l1log.predict_log_proba(X)
y_hat = l1log.predict(X)
assert_equal(accuracy_score(y, y_hat), l1log.score(X, y))
assert (np.sign(abs(w)) == np.sign(abs(l1log.coef_))).mean() >= .8
def test_l1logistic_multiclass_not_shared():
"""Test that multiclass L1 Logistic runs in the UoI framework when all
classes share a support."""
n_features = 20
n_inf = 10
X, y, w, b = make_classification(n_samples=400,
random_state=10,
n_classes=5,
n_informative=n_inf,
n_features=n_features,
shared_support=False,
w_scale=4.)
l1log = UoI_L1Logistic(shared_support=False).fit(X, y)
l1log.predict_log_proba(X)
y_hat = l1log.predict(X)
assert_equal(accuracy_score(y, y_hat), l1log.score(X, y))
assert (np.sign(abs(w)) == np.sign(abs(l1log.coef_))).mean() >= .7
def test_masked_logistic():
"""Test the masked logistic regression class."""
n_features = 20
n_inf = 10
for shared_support in [True, False]:
for n_classes in [2, 3]:
for intercept in [True, False]:
X, y, w, b = make_classification(n_samples=200,
random_state=10,
n_classes=n_classes,
n_informative=n_inf,
n_features=n_features,
shared_support=shared_support,
include_intercept=intercept,
w_scale=4.)
mask = np.squeeze(np.logical_not(np.equal(w, 0)))
for penalty in ['l1', 'l2']:
lr = MaskedCoefLogisticRegression(penalty=penalty, C=10.,
warm_start=True,
fit_intercept=intercept)
lr.fit(X, y, coef_mask=mask)
coef_idxs = np.flatnonzero(np.equal(lr.coef_, 0.))
coef_idxs = set(coef_idxs.tolist())
mask_idxs = np.flatnonzero(np.equal(mask, 0))
mask_idxs = set(mask_idxs.tolist())
assert mask_idxs.issubset(coef_idxs)
lr.fit(X, y, coef_mask=mask)
def test_masked_logistic_standardize():
"""Test the masked logistic regression class with `standardize=True`."""
n_features = 20
n_inf = 10
for shared_support in [True, False]:
for n_classes in [2, 3]:
for intercept in [True, False]:
X, y, w, b = make_classification(n_samples=200,
random_state=10,
n_classes=n_classes,
n_informative=n_inf,
n_features=n_features,
shared_support=shared_support,
include_intercept=intercept,
w_scale=4.)
mask = np.squeeze(np.logical_not(np.equal(w, 0)))
for penalty in ['l1', 'l2']:
lr = MaskedCoefLogisticRegression(penalty=penalty, C=10.,
warm_start=True,
fit_intercept=intercept,
standardize=True)
lr.fit(X, y, coef_mask=mask)
coef_idxs = np.flatnonzero(np.equal(lr.coef_, 0.))
coef_idxs = set(coef_idxs.tolist())
mask_idxs = np.flatnonzero(np.equal(mask, 0))
mask_idxs = set(mask_idxs.tolist())
assert mask_idxs.issubset(coef_idxs)
lr.fit(X, y, coef_mask=mask)
@pytest.mark.parametrize("n_classes,penalty,fit_intercept", [(3, "l2", True),
(3, "l2", False),
(3, "l1", True),
(3, "l1", False),
(2, "l2", True),
(2, "l2", False),
(2, "l1", True),
(2, "l1", False)])
def test_masking_with_indexing(n_classes, penalty, fit_intercept):
"""Check that indexing the masks gives the same results as masking with
logistic regression.
"""
X, y, w, intercept = make_classification(n_samples=1000,
n_classes=n_classes,
n_features=20,
n_informative=10,
random_state=0)
mask = w != 0.
if n_classes == 2:
mask = mask.ravel()
coefs, _, _ = _logistic_regression_path(X, y, [10.], coef_mask=mask,
penalty=penalty,
fit_intercept=fit_intercept)
coefs_old, _, _ = _logistic_regression_path_old(X, y, [10.], coef_mask=mask,
penalty=penalty,
fit_intercept=fit_intercept)
assert_allclose(coefs, coefs_old)
coefs, _, _ = _logistic_regression_path(X, y, [10.],
penalty=penalty,
fit_intercept=fit_intercept)
coefs_old, _, _ = _logistic_regression_path_old(X, y, [10.],
penalty=penalty,
fit_intercept=fit_intercept)
assert_allclose(coefs, coefs_old)
@pytest.mark.parametrize("n_classes,penalty,fit_intercept", [(3, "l2", True),
(3, "l2", False),
(3, "l1", True),
(3, "l1", False),
(2, "l2", True),
(2, "l2", False),
(2, "l1", True),
(2, "l1", False)])
def test_all_masked_with_indexing(n_classes, penalty, fit_intercept):
"""Check masking all of the coef either works with intercept or raises an error.
"""
X, y, w, intercept = make_classification(n_samples=1000,
n_classes=n_classes,
n_features=20,
n_informative=10,
random_state=0)
mask = np.zeros_like(w)
if n_classes == 2:
mask = mask.ravel()
coefs, _, _ = _logistic_regression_path(X, y, [10.], coef_mask=mask,
fit_intercept=fit_intercept)
if fit_intercept:
if n_classes == 2:
assert_equal(coefs[0][:-1], 0.)
else:
assert_equal(coefs[0][:, :-1], 0.)
else:
assert_equal(coefs[0], 0.)
def test_estimation_score_usage():
"""Test the ability to change the estimation score in UoI L1Logistic"""
methods = ('acc', 'log', 'BIC', 'AIC', 'AICc')
X, y, w, b = make_classification(n_samples=200,
random_state=6,
n_informative=5,
n_features=10)
scores = []
for method in methods:
l1log = UoI_L1Logistic(random_state=12, estimation_score=method,
tol=1e-2, n_boots_sel=24, n_boots_est=24)
assert_equal(l1log.estimation_score, method)
l1log.fit(X, y)
scores.append(l1log.scores_)
scores = np.stack(scores)
assert_equal(len(np.unique(scores, axis=0)), len(methods))
def test_set_random_state():
"""Tests whether random states are handled correctly."""
X, y, w, b = make_classification(n_samples=100,
random_state=60,
n_informative=4,
n_features=5,
w_scale=4.)
# same state
l1log_0 = UoI_L1Logistic(random_state=13)
l1log_1 = UoI_L1Logistic(random_state=13)
l1log_0.fit(X, y)
l1log_1.fit(X, y)
assert_array_equal(l1log_0.coef_, l1log_1.coef_)
# different state
l1log_1 = UoI_L1Logistic(random_state=14)
l1log_1.fit(X, y)
assert not np.array_equal(l1log_0.coef_, l1log_1.coef_)
# different state, not set
l1log_0 = UoI_L1Logistic()
l1log_1 = UoI_L1Logistic()
l1log_0.fit(X, y)
l1log_1.fit(X, y)
assert not np.array_equal(l1log_0.coef_, l1log_1.coef_)
def test_normalization_by_samples():
"""Test that coef_ does not depend directly on the number of samples."""
n_features = 20
for n_classes in [2, 3]:
X, y, w, b = make_classification(n_samples=200,
random_state=10,
n_classes=n_classes,
n_informative=n_features,
n_features=n_features,
w_scale=4.)
for penalty in ['l1', 'l2']:
lr1 = MaskedCoefLogisticRegression(penalty=penalty, C=1e2)
lr1.fit(X, y)
lr3 = MaskedCoefLogisticRegression(penalty=penalty, C=1e2)
lr3.fit(np.tile(X, (3, 1)), np.tile(y, 3))
assert_allclose(lr1.coef_, lr3.coef_)
def test_l1logistic_binary_strings():
"""Test that binary L1 Logistic runs in the UoI framework."""
n_inf = 10
X, y, w, b = make_classification(n_samples=200,
random_state=6,
n_informative=n_inf,
n_features=20,
w_scale=4.,
include_intercept=True)
classes = ['a', 'b']
lb = LabelEncoder()
lb.fit(classes)
y = lb.inverse_transform(y)
l1log = UoI_L1Logistic(random_state=10).fit(X, y)
y_hat = l1log.predict(X)
assert set(classes) >= set(y_hat)
def test_l1logistic_multiclass_strings():
"""Test that multiclass L1 Logistic runs in the UoI framework when all
classes share a support."""
n_features = 20
n_inf = 10
X, y, w, b = make_classification(n_samples=200,
random_state=10,
n_classes=5,
n_informative=n_inf,
n_features=n_features,
shared_support=True,
w_scale=4.)
classes = ['a', 'b', 'c', 'd', 'e']
lb = LabelEncoder()
lb.fit(classes)
y = lb.inverse_transform(y)
l1log = UoI_L1Logistic(random_state=10).fit(X, y)
y_hat = l1log.predict(X)
assert set(classes) >= set(y_hat)
def test_l1logistic_sparse_input():
"""Test that multiclass L1 Logistic works when using sparse matrix
inputs"""
rs = np.random.RandomState(17)
X = sprand(100, 100, random_state=rs)
classes = ['abc', 'de', 'fgh']
y = np.array(classes)[rs.randint(3, size=100)]
kwargs = dict(
fit_intercept=False,
random_state=rs,
n_boots_sel=4,
n_boots_est=4,
n_C=7,
)
l1log = UoI_L1Logistic(**kwargs).fit(X, y)
y_hat = l1log.predict(X)
assert set(classes) >= set(y_hat)
def test_l1logistic_sparse_input_no_center():
"""Test that multiclass L1 Logistic raises an error when asked to center
sparse data.
"""
rs = np.random.RandomState(17)
X = sprand(10, 10, random_state=rs)
classes = ['abc', 'de', 'fgh']
y = np.array(classes)[rs.randint(3, size=10)]
with pytest.raises(ValueError):
UoI_L1Logistic(fit_intercept=True).fit(X, y)
def test_l1logistic_bad_est_score():
"""Test that multiclass L1 Logistic raises an error when given a bad
estimation_score value.
"""
X = | np.random.randn(20, 5) | numpy.random.randn |
# 'source /home/voanna/TimePrediction/src/bash/gpu_caffe_env_variables ')
from __future__ import print_function
import os
import time_to_label
import glob
import math
import numpy as np
import scipy.io
import json
import argparse
import HONHelpers as hon
import random
parser = argparse.ArgumentParser()
parser.add_argument("webcam", help="either the name of the webcam you want to use from {} or 'all'".format(hon.webcams), type=str)
parser.add_argument("GPU_ID", help="gpu core to run the caffe training on", type=int)
args = parser.parse_args()
np.random.seed(6)
random.seed(6)
if args.webcam == 'all':
webcams = hon.webcams
else:
assert args.webcam in hon.webcams
webcams = [args.webcam]
CAFFE_PATH = hon.gpu_caffe_root
CAFFE_MODEL = hon.VGG16_caffemodel_path
DATA_ROOT = hon.hon_data_root
EXPERIMENT_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), args.webcam)
training_frac = 0.8
MIN_TEMPERATURE = 5
with open(os.path.join(EXPERIMENT_ROOT, 'train.txt'), 'w') as ftrain, \
open(os.path.join(EXPERIMENT_ROOT, 'val.txt'), 'w') as fval:
for webcam in webcams:
matfile = os.path.join(DATA_ROOT, webcam, 'train_data_aligned.mat')
labels = scipy.io.loadmat(matfile)
labels = labels['y']
labels = labels[~ | np.isnan(labels) | numpy.isnan |
import struct
import numpy as np
import random
def norm_sgy(data):
maxval=max(max(data))
minval=min(min(data))
return [[(float(i)-minval)/float(maxval-minval) for i in j] for j in data]
def norm_sgy1(data):
#maxval=max(max(data))
#minval=min(min(data))
# return [[(float(i)-min(j))/float(max(j)-min(j)) for i in j] for j in data]
return [[float(i)/float(max(np.abs(j))) for i in j] for j in data]
def norm_sgy2(data):
maxval=np.max(np.abs(data),axis=1)
index=[i for i in range(len(data))]
return [data[j]/maxval[j] for j in index]
def norm_tgy(data):
# maxval=np.max(np.abs(data),axis=1)
maxval=np.max(1000)
index=[i for i in range(len(data))]
return [data[j]/maxval for j in index]
def norm_ydata(data):
maxval=np.max(np.abs(data),axis=0)
minval=np.min(np.abs(data),axis=0)
# maxval=np.max(data)
# minval=np.min(data)
# return [(j-minval[np.where(j)])/(maxval[np.where(j)]-minval[np.where(j)]) for j in data], maxval, minval
return [j/maxval for j in data], maxval, minval
# return [[(float(i)-float(minval))/float(maxval-minval) for i in j] for j in data]
def read_sgy(sgynam):
# print "sgynam: "+sgynam
try:
binsgy = open(sgynam,'rb')
except IOError:
return 0,0,[]
fhead=binsgy.read(3600);
# print fhead[3213:3215]
nr=struct.unpack(">H",fhead[3212:3214])
print (nr)
nsmp=struct.unpack(">H",fhead[3220:3222])
print(nsmp)
data = []
for ir in range(0,nr[0]):
trchead=binsgy.read(240)
trcdata=binsgy.read(nsmp[0]*4)
data1 = []
for i in range(0,nsmp[0]):
# print(trcdata[i*4:i*4+4])
data1=data1+list(struct.unpack(">f",trcdata[i*4:i*4+4]))
data.append(data1)
print("read 1sgy end")
binsgy.close()
return nr,nsmp,data;
def read_egy(egynam):
try:
binegy = open(egynam,'rb')
except IOError:
return 0,[]
data=[]
fbinegy=binegy.read()
ndata=len(fbinegy)//4
for i in range(ndata):
data1=[]
data1=struct.unpack("f",fbinegy[i*4:i*4+4])
data.append(data1)
print("read 1egy end")
binegy.close()
return ndata, data
def read_tgy(tgynam):
try:
binegy = open(tgynam,'rb')
except IOError:
return 0,[]
data=[]
fbinegy=binegy.read()
ndata=len(fbinegy)//4
for i in range(ndata):
data1=[]
data1=struct.unpack("f",fbinegy[i*4:i*4+4])
data.append(data1)
print("read 1tgy end")
binegy.close()
return data
def load_data(sgynam='sgy',sgyf1=0,sgyt1=300,sgyf2=0,sgyt2=300,shuffle='true'):
data= []
ydata=[]
for i in range(sgyf1,sgyt1):
print(sgynam+"/event/event%04d.sgy" %(i))
nr,nsmp,data1 = read_sgy(sgynam+"/event/event%04d.sgy" %(i));
if nr != 0:
data1=norm_sgy1(data1)
data.append(data1)
ydata.append(1);
else:
print('1 event sgy not found')
for i in range(sgyf2,sgyt2):
nr,nsmp,data1 = read_sgy(sgynam+"/noise/noise%04d.sgy" %(i));
if nr != 0:
data1=norm_sgy1(data1)
data.append(data1)
ydata.append(0);
else:
print('1 noise sgy not found')
index=[i for i in range(len(ydata))]
random.seed(7)
if shuffle == 'true':
random.shuffle(index)
data = [data[i] for i in index]
ydata = [ydata[i] for i in index]
data=np.array(data)
ydata=np.array(ydata)
return data.shape[0],(data.shape[1]*data.shape[2]),data,ydata
def load_sgylist(sgylist,floc,shuffle='false'):
data= []
ydata=[]
lines=open(sgylist,'r').readlines()
lines2=open(floc,'r').readlines()
for i in range(0,len(lines)):
egynam=lines[i][:lines[i].find(' ')]
print(egynam)
ndata, data1 = read_egy(egynam)
tgynam=lines2[i][:lines2[i].find(' ')]
print(tgynam)
data2 = read_tgy(tgynam);
if ndata != 0:
data2=norm_tgy(data2)
data.append(data1)
ydata.append(data2)
else:
print('1 event tgy not found')
index=[i for i in range(len(ydata))]
random.seed(7)
if shuffle == 'true':
random.shuffle(index)
data = [data[i] for i in index]
ydata = [ydata[i] for i in index]
data=np.array(data)
ydata= | np.array(ydata) | numpy.array |
"""Collection of functions to run forwards and backwards algorithms on haploid genotype data, where the data is structured as samples x variants."""
import numba as nb
import numpy as np
@nb.jit
def forwards_ls_hap(n, m, H, s, e, r, norm=True):
"""Matrix based haploid LS forward algorithm using numpy vectorisation."""
# Initialise
F = np.zeros((n, m))
c = | np.ones(m) | numpy.ones |
import unittest
from ancb import NumpyCircularBuffer
from ancb import ( # type: ignore
star_can_broadcast, can_broadcast
)
from numpy import array_equal, allclose, shares_memory
from numpy import array, zeros, arange, ndarray, ones, empty
from numpy.random import rand, randint
from numpy import fill_diagonal, roll
from itertools import zip_longest
from operator import (
matmul, add, sub, mul, truediv, mod, floordiv, pow,
rshift, lshift, and_, or_, xor, neg, pos, abs, inv, invert,
iadd, iand, ifloordiv, ilshift, imod, imul,
ior, ipow, irshift, isub, itruediv, ixor
)
class TestBroadcastability(unittest.TestCase):
def test_broadcastablity(self):
x = zeros((1, 2, 3, 4, 5))
y = zeros((1, 1, 1, 4, 5))
z = zeros((1, 1, 1, 3, 5))
w = zeros(1)
self.assertTrue(can_broadcast(x.shape, y.shape))
self.assertFalse(can_broadcast(x.shape, z.shape))
self.assertFalse(can_broadcast(y.shape, z.shape))
self.assertTrue(can_broadcast(x.shape, x.shape))
self.assertTrue(can_broadcast(y.shape, y.shape))
self.assertTrue(can_broadcast(z.shape, z.shape))
self.assertTrue(can_broadcast(w.shape, w.shape))
self.assertTrue(can_broadcast(x.shape, w.shape))
self.assertTrue(can_broadcast(y.shape, w.shape))
self.assertTrue(can_broadcast(z.shape, w.shape))
def test_star_broadcastablity(self):
x = zeros((1, 2, 3, 4, 5))
y = zeros((1, 1, 1, 4, 5))
z = zeros((1, 1, 1, 3, 5))
w = zeros(1)
starexpr = zip_longest(x.shape, y.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, z.shape, fillvalue=1)
self.assertFalse(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, z.shape, fillvalue=1)
self.assertFalse(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, x.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, y.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(z.shape, z.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(w.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(z.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
class OperatorTestFactory(type):
def __new__(cls, name, bases, dct):
obj = super().__new__(cls, name, bases, dct)
bin_operators = [
matmul, add, sub, mul, truediv, mod, floordiv, pow
]
un_operators = [neg, pos, abs, invert, inv]
bitbin_operators = [rshift, lshift, and_, or_, xor]
i_operators = [
iadd, ifloordiv, imul, ipow, isub, itruediv
]
bit_ioperators = [
ilshift, irshift, ior, iand, ixor, imod
]
def unop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
test = -arange(3, dtype=int)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(-1)
buffer.append(-2)
res = op(buffer)
self.assertIsInstance(res, ndarray)
self.assertTrue(array_equal(res, op(test))) # unfrag
buffer.append(-3)
test -= 1
res = op(buffer)
self.assertIsInstance(res, ndarray)
self.assertTrue(array_equal(res, op(test))) # frag
return f
def bitbinop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
test = arange(1, 4, dtype=int)
x = randint(3)
buffer = NumpyCircularBuffer(data)
buffer.append(1)
buffer.append(2)
buffer.append(3)
res1 = op(buffer, x)
res2 = op(x, buffer)
self.assertIsInstance(res1, ndarray)
self.assertIsInstance(res2, ndarray)
self.assertTrue(array_equal(res1, op(test, x)))
self.assertTrue(array_equal(res2, op(x, test)))
buffer.append(4)
test += 1
res1 = op(buffer, x)
res2 = op(x, buffer)
self.assertIsInstance(res1, ndarray)
self.assertIsInstance(res2, ndarray)
self.assertTrue(array_equal(res1, op(test, x)))
self.assertTrue(array_equal(res2, op(x, test)))
return f
def binop_testcase(op):
def f(self):
data = zeros(3, dtype=float)
test = arange(1, 4, dtype=float)
x = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(1)
buffer.append(2)
buffer.append(3)
res1 = op(buffer, x)
self.assertIsInstance(res1, ndarray)
self.assertTrue(allclose(res1, op(test, x)))
res2 = op(x, buffer)
self.assertIsInstance(res2, ndarray)
self.assertTrue(allclose(res2, op(x, test)))
buffer.append(4)
test += 1
res1 = op(buffer, x)
self.assertIsInstance(res1, ndarray)
self.assertTrue(allclose(res1, op(test, x)))
res2 = op(x, buffer)
self.assertIsInstance(res2, ndarray)
self.assertTrue(allclose(res2, op(x, test)))
return f
def iop_testcase(op):
def f(self):
data = zeros(3, dtype=float)
data2 = zeros(3, dtype=float)
test1 = arange(1, 4, dtype=float)
test2 = arange(2, 5, dtype=float)
x = rand(3)
buffer1 = NumpyCircularBuffer(data)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(1)
buffer1.append(2)
buffer1.append(3)
buffer2.append(1)
buffer2.append(2)
buffer2.append(3)
op(buffer1, x)
op(test1, x)
self.assertIsInstance(buffer1, NumpyCircularBuffer)
self.assertTrue(array_equal(buffer1 + 0, test1))
buffer2.append(4)
op(buffer2, x)
op(test2, x)
self.assertIsInstance(buffer2, NumpyCircularBuffer)
self.assertTrue(array_equal(buffer2 + 0, test2))
return f
def bitiop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
data2 = zeros(3, dtype=int)
test1 = arange(1, 4, dtype=int)
test2 = arange(2, 5, dtype=int)
x = randint(low=1, high=100, size=3)
buffer1 = NumpyCircularBuffer(data)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(1)
buffer1.append(2)
buffer1.append(3)
buffer2.append(1)
buffer2.append(2)
buffer2.append(3)
op(buffer1, x)
op(test1, x)
self.assertIsInstance(buffer1, NumpyCircularBuffer)
self.assertTrue(allclose(buffer1 + 0, test1))
buffer2.append(4)
op(buffer2, x)
op(test2, x)
self.assertIsInstance(buffer2, NumpyCircularBuffer)
self.assertTrue(allclose(buffer2 + 0, test2))
return f
for op in bin_operators:
setattr(obj, 'test_{}'.format(op.__name__), binop_testcase(op))
for op in bitbin_operators:
setattr(obj, 'test_{}'.format(op.__name__), bitbinop_testcase(op))
for op in un_operators:
setattr(obj, 'test_{}'.format(op.__name__), unop_testcase(op))
for op in i_operators:
setattr(obj, 'test_{}'.format(op.__name__), iop_testcase(op))
for op in bit_ioperators:
setattr(obj, 'test_{}'.format(op.__name__), bitiop_testcase(op))
return(obj)
class TestNumpyCircularBuffer(
unittest.TestCase, metaclass=OperatorTestFactory
):
"""
NumpyCircularBuffer tests
"""
def test_init(self):
data = zeros(3)
buffer = NumpyCircularBuffer(data)
self.assertTrue(array_equal(data, buffer))
def test_fragmentation(self):
data = zeros(3)
buffer = NumpyCircularBuffer(data)
self.assertFalse(buffer.fragmented)
buffer.append(0)
self.assertFalse(buffer.fragmented)
buffer.append(1)
self.assertFalse(buffer.fragmented)
buffer.append(2)
self.assertFalse(buffer.fragmented)
buffer.append(3)
self.assertTrue(buffer.fragmented)
buffer.append(4)
self.assertTrue(buffer.fragmented)
buffer.append(5)
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
def test_matmul_1d1d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
self.assertTrue(allclose(buffer @ C[:1], arange(1) @ C[:1]))
buffer.append(1)
self.assertTrue(allclose(buffer @ C[:2], arange(2) @ C[:2]))
buffer.append(2)
self.assertTrue(allclose(buffer @ C, arange(3) @ C))
buffer.append(3)
self.assertTrue(allclose(buffer @ C, (arange(1, 4)) @ C))
buffer.append(4)
self.assertTrue(allclose(buffer @ C, (arange(2, 5)) @ C))
buffer.append(5)
self.assertTrue(allclose(buffer @ C, (arange(3, 6)) @ C))
buffer.append(6)
self.assertTrue(allclose(buffer @ C, (arange(4, 7)) @ C))
buffer.pop()
self.assertTrue(allclose(buffer @ C[1:], (arange(5, 7)) @ C[1:]))
buffer.pop()
self.assertTrue(allclose(buffer @ C[2:], (arange(6, 7)) @ C[2:]))
def test_matmul_1d2d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 2"""
data = zeros(3)
A = zeros((3, 3))
B = rand(9).reshape(3, 3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, arange(3) @ A))
self.assertTrue(allclose(res_b, arange(3) @ B))
buffer.append(3)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(allclose(res_a, arange(1, 4) @ A))
self.assertTrue(allclose(res_b, arange(1, 4) @ B))
def test_matmul_2d2d(self):
"""Tests buffer @ X where buffer.ndim == 2"""
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
fill_diagonal(A, arange(1, 4))
buffer = NumpyCircularBuffer(data)
buffer.append(arange(3))
buffer.append(arange(3, 6))
buffer.append(arange(6, 9))
test = arange(9).reshape(3, 3)
self.assertTrue(array_equal(buffer, test))
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(arange(9, 12))
test += 3
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
def test_matmul_ndnd(self):
"""Tests buffer @ X where X.ndim > 2 and buffer.ndim > 2"""
data = zeros((3, 3, 3))
A = zeros((3, 3, 3))
B = rand(27).reshape(3, 3, 3)
C = rand(12).reshape(3, 4)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
filler = arange(9).reshape(3, 3)
buffer.append(filler)
buffer.append(filler + 9)
buffer.append(filler + 18)
test = arange(27).reshape(3, 3, 3)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(filler + 27)
test += 9
res_a = buffer @ A
res_b = buffer @ B
res_c = buffer @ C
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
self.assertTrue(allclose(res_c, test @ C))
def test_rmatmul_1d1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
res_c = C[:1] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:1] @ arange(1)))
buffer.append(1)
res_c = C[:2] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:2] @ arange(2)))
buffer.append(2)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3)))
buffer.append(3)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(1, 4)))
buffer.append(4)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
buffer.append(6)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(4, 7)))
buffer.pop()
res_c = C[1:] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[1:] @ arange(5, 7)))
buffer.pop()
res_c = C[2:] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[2:] @ arange(6, 7)))
def test_rmatmul_nd1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data = zeros(3)
A = zeros(9).reshape(3, 3)
B = arange(9).reshape(3, 3)
C = arange(3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = A @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertTrue(array_equal(A @ buffer, A @ array([0, 1, 2])))
buffer.append(3)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ array([1, 2, 3])))
self.assertTrue(allclose(res_b, B @ array([1, 2, 3])))
self.assertTrue(allclose(res_c, C @ array([1, 2, 3])))
buffer.append(4)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(2, 5)))
self.assertTrue(allclose(res_b, B @ arange(2, 5)))
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(3, 6)))
self.assertTrue(allclose(res_b, B @ arange(3, 6)))
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
def test_rmatmul_1dnd(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data1 = zeros((3, 3))
data2 = zeros((3, 3, 3))
A = rand(3)
test1 = arange(9).reshape(3, 3)
test2 = arange(27).reshape(3, 3, 3)
buffer1 = NumpyCircularBuffer(data1)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(arange(3))
buffer1.append(arange(3, 6))
buffer1.append(arange(6, 9))
buffer2.append(arange(9).reshape(3, 3))
buffer2.append(arange(9, 18).reshape(3, 3))
buffer2.append(arange(18, 27).reshape(3, 3))
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(9, 12))
buffer2.append(arange(27, 36).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(12, 15))
buffer2.append(arange(36, 45).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(15, 18))
buffer2.append(arange(45, 54).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
def test_rmatmul_2d2d(self):
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
C = rand(12).reshape(4, 3)
fill_diagonal(A, arange(1, 4))
buffer = NumpyCircularBuffer(data)
buffer.append(arange(3))
buffer.append(arange(3, 6))
buffer.append(arange(6, 9))
test = arange(9).reshape(3, 3)
self.assertTrue(array_equal(buffer, test))
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
buffer.append([9, 10, 11])
test += 3
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
def test_rmatmul_ndnd(self):
data = zeros((3, 3, 3))
A = zeros(27).reshape(3, 3, 3)
B = arange(27).reshape(3, 3, 3)
C = arange(3*8*3).reshape(3, 8, 3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
filler = arange(9).reshape(3, 3)
buffer.append(filler)
buffer.append(filler + 9)
buffer.append(filler + 18)
test = arange(27).reshape(3, 3, 3)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
buffer.append(filler + 27)
test += 9
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
def test_matmul2_1d1d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
self.assertTrue(allclose(
buffer.matmul(C[:1], empty(1)), arange(1) @ C[:1]
)
)
buffer.append(1)
self.assertTrue(allclose(
buffer.matmul(C[:2], empty(2)), arange(2) @ C[:2]
)
)
buffer.append(2)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(3) @ C
)
)
buffer.append(3)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(1, 4) @ C
)
)
buffer.append(4)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(2, 5) @ C
)
)
buffer.append(5)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(3, 6) @ C
)
)
buffer.append(6)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(4, 7) @ C
)
)
buffer.pop()
self.assertTrue(allclose(
buffer.matmul(C[1:], empty(2)), arange(5, 7) @ C[1:]
)
)
buffer.pop()
self.assertTrue(allclose(
buffer.matmul(C[2:], empty(1)), arange(6, 7) @ C[2:]
)
)
def test_matmul2_1d2d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 2"""
data = zeros(3)
A = zeros((3, 3))
B = rand(9).reshape(3, 3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = buffer.matmul(A, empty(3))
res_b = buffer.matmul(B, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, arange(3) @ A))
self.assertTrue(allclose(res_b, arange(3) @ B))
buffer.append(3)
res_a = buffer.matmul(A, empty(3))
res_b = buffer.matmul(B, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(allclose(res_a, arange(1, 4) @ A))
self.assertTrue(allclose(res_b, arange(1, 4) @ B))
def test_matmul2_2d2d(self):
"""Tests buffer @ X where buffer.ndim == 2"""
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
fill_diagonal(A, arange(1, 4))
buffer = NumpyCircularBuffer(data)
buffer.append(arange(3))
buffer.append(arange(3, 6))
buffer.append(arange(6, 9))
test = arange(9).reshape(3, 3)
self.assertTrue(array_equal(buffer, test))
res_a = buffer.matmul(A, empty((3, 3)))
res_b = buffer.matmul(B, empty((3, 3)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(arange(9, 12))
test += 3
res_a = buffer.matmul(A, empty((3, 3)))
res_b = buffer.matmul(B, empty((3, 3)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
def test_matmul2_ndnd(self):
"""Tests buffer @ X where X.ndim > 2 and buffer.ndim > 2"""
data = zeros((3, 3, 3))
A = zeros((3, 3, 3))
B = rand(27).reshape(3, 3, 3)
C = rand(12).reshape(3, 4)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
filler = | arange(9) | numpy.arange |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from functools import partial
import itertools
import operator
import unittest
from unittest import SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import jax
import jax.numpy as jnp
from jax import core
from jax._src import dtypes
from jax import lax
from jax._src import test_util as jtu
from jax import tree_util
from jax._src import lax_reference
from jax.test_util import check_grads
import jax.util
from jax._src.util import prod
from jax._src.lax.lax import _device_put_raw
from jax.config import config
config.parse_flags_with_absl()
### lax tests
# For standard unops and binops, we can generate a large number of tests on
# arguments of appropriate shapes and dtypes using the following table.
float_dtypes = jtu.dtypes.all_floating
complex_elem_dtypes = jtu.dtypes.floating
complex_dtypes = jtu.dtypes.complex
inexact_dtypes = jtu.dtypes.all_inexact
int_dtypes = jtu.dtypes.all_integer
uint_dtypes = jtu.dtypes.all_unsigned
bool_dtypes = jtu.dtypes.boolean
default_dtypes = float_dtypes + int_dtypes
all_dtypes = float_dtypes + complex_dtypes + int_dtypes + uint_dtypes + bool_dtypes
python_scalar_types = [bool, int, float, complex]
compatible_shapes = [[(3,)], [(3, 4), (3, 1), (1, 4)], [(2, 3, 4), (2, 1, 4)]]
# We check cases where the preferred type is at least as wide as the input
# type and where both are either both floating-point or both integral,
# which are the only supported configurations.
preferred_type_combinations = [
(np.float16, np.float16), (np.float16, np.float32), (np.float16, np.float64),
(dtypes.bfloat16, dtypes.bfloat16), (dtypes.bfloat16, np.float32),
(dtypes.bfloat16, np.float64), (np.float32, np.float32), (np.float32, np.float64),
(np.float64, np.float64), (np.int8, np.int8), (np.int8, np.int16), (np.int8, np.int32),
(np.int8, np.int64), (np.int16, np.int16), (np.int16, np.int32), (np.int16, np.int64),
(np.int32, np.int32), (np.int32, np.int64), (np.int64, np.int64),
(np.complex64, np.complex64), (np.complex64, np.complex128), (np.complex128, np.complex128)]
OpRecord = collections.namedtuple(
"OpRecord", ["op", "nargs", "dtypes", "rng_factory", "tol"])
def op_record(op, nargs, dtypes, rng_factory, tol=None):
return OpRecord(op, nargs, dtypes, rng_factory, tol)
LAX_OPS = [
op_record("neg", 1, default_dtypes + complex_dtypes, jtu.rand_small),
op_record("sign", 1, default_dtypes + uint_dtypes, jtu.rand_small),
op_record("floor", 1, float_dtypes, jtu.rand_small),
op_record("ceil", 1, float_dtypes, jtu.rand_small),
op_record("round", 1, float_dtypes, jtu.rand_default),
op_record("nextafter", 2, [f for f in float_dtypes if f != dtypes.bfloat16],
jtu.rand_default, tol=0),
op_record("is_finite", 1, float_dtypes, jtu.rand_small),
op_record("exp", 1, float_dtypes + complex_dtypes, jtu.rand_small),
# TODO(b/142975473): on CPU, expm1 for float64 is only accurate to ~float32
# precision.
op_record("expm1", 1, float_dtypes + complex_dtypes, jtu.rand_small,
{np.float64: 1e-8}),
op_record("log", 1, float_dtypes + complex_dtypes, jtu.rand_positive),
op_record("log1p", 1, float_dtypes + complex_dtypes, jtu.rand_positive),
# TODO(b/142975473): on CPU, tanh for complex128 is only accurate to
# ~float32 precision.
# TODO(b/143135720): on GPU, tanh has only ~float32 precision.
op_record("tanh", 1, float_dtypes + complex_dtypes, jtu.rand_small,
{np.float64: 1e-9, np.complex128: 1e-7}),
op_record("sin", 1, float_dtypes + complex_dtypes, jtu.rand_default),
op_record("cos", 1, float_dtypes + complex_dtypes, jtu.rand_default),
op_record("atan2", 2, float_dtypes, jtu.rand_default),
op_record("sqrt", 1, float_dtypes, jtu.rand_positive),
op_record("sqrt", 1, complex_dtypes, jtu.rand_default),
op_record("rsqrt", 1, float_dtypes, jtu.rand_positive),
op_record("rsqrt", 1, complex_dtypes, jtu.rand_default),
op_record("cbrt", 1, float_dtypes, jtu.rand_default),
op_record("square", 1, float_dtypes + complex_dtypes, jtu.rand_default),
op_record("reciprocal", 1, float_dtypes + complex_dtypes, jtu.rand_positive),
op_record("tan", 1, float_dtypes + complex_dtypes, jtu.rand_default, {np.float32: 3e-5}),
op_record("asin", 1, float_dtypes + complex_dtypes, jtu.rand_small),
op_record("acos", 1, float_dtypes + complex_dtypes, jtu.rand_small),
op_record("atan", 1, float_dtypes + complex_dtypes, jtu.rand_small),
op_record("asinh", 1, float_dtypes + complex_dtypes, jtu.rand_default,
tol={np.complex64: 1E-4, np.complex128: 1E-5}),
op_record("acosh", 1, float_dtypes + complex_dtypes, jtu.rand_positive),
# TODO(b/155331781): atanh has only ~float precision
op_record("atanh", 1, float_dtypes + complex_dtypes, jtu.rand_small, {np.float64: 1e-9}),
op_record("sinh", 1, float_dtypes + complex_dtypes, jtu.rand_default),
op_record("cosh", 1, float_dtypes + complex_dtypes, jtu.rand_default),
op_record("lgamma", 1, float_dtypes, jtu.rand_positive,
{np.float32: 1e-3 if jtu.device_under_test() == "tpu" else 1e-5,
np.float64: 1e-14}),
op_record("digamma", 1, float_dtypes, jtu.rand_positive,
{np.float64: 1e-14}),
op_record("betainc", 3, float_dtypes, jtu.rand_positive,
{np.float64: 1e-14}),
op_record("igamma", 2,
[f for f in float_dtypes if f not in [dtypes.bfloat16, np.float16]],
jtu.rand_positive, {np.float64: 1e-14}),
op_record("igammac", 2,
[f for f in float_dtypes if f not in [dtypes.bfloat16, np.float16]],
jtu.rand_positive, {np.float64: 1e-14}),
op_record("erf", 1, float_dtypes, jtu.rand_small),
op_record("erfc", 1, float_dtypes, jtu.rand_small),
# TODO(b/142976030): the approximation of erfinf used by XLA is only
# accurate to float32 precision.
op_record("erf_inv", 1, float_dtypes, jtu.rand_small,
{np.float64: 1e-9}),
op_record("bessel_i0e", 1, float_dtypes, jtu.rand_default),
op_record("bessel_i1e", 1, float_dtypes, jtu.rand_default),
op_record("real", 1, complex_dtypes, jtu.rand_default),
op_record("imag", 1, complex_dtypes, jtu.rand_default),
op_record("complex", 2, complex_elem_dtypes, jtu.rand_default),
op_record("conj", 1, complex_elem_dtypes + complex_dtypes,
jtu.rand_default),
op_record("abs", 1, default_dtypes + complex_dtypes, jtu.rand_default),
op_record("pow", 2, float_dtypes + complex_dtypes, jtu.rand_positive),
op_record("bitwise_and", 2, bool_dtypes, jtu.rand_small),
op_record("bitwise_not", 1, bool_dtypes, jtu.rand_small),
op_record("bitwise_or", 2, bool_dtypes, jtu.rand_small),
op_record("bitwise_xor", 2, bool_dtypes, jtu.rand_small),
op_record("population_count", 1, int_dtypes + uint_dtypes, jtu.rand_int),
op_record("clz", 1, int_dtypes + uint_dtypes, jtu.rand_int),
op_record("add", 2, default_dtypes + complex_dtypes, jtu.rand_small),
op_record("sub", 2, default_dtypes + complex_dtypes, jtu.rand_small),
op_record("mul", 2, default_dtypes + complex_dtypes, jtu.rand_small),
op_record("div", 2, default_dtypes + complex_dtypes, jtu.rand_nonzero),
op_record("rem", 2, default_dtypes, jtu.rand_nonzero),
op_record("max", 2, all_dtypes, jtu.rand_small),
op_record("min", 2, all_dtypes, jtu.rand_small),
op_record("eq", 2, all_dtypes, jtu.rand_some_equal),
op_record("ne", 2, all_dtypes, jtu.rand_small),
op_record("ge", 2, default_dtypes, jtu.rand_small),
op_record("gt", 2, default_dtypes, jtu.rand_small),
op_record("le", 2, default_dtypes, jtu.rand_small),
op_record("lt", 2, default_dtypes, jtu.rand_small),
]
class LaxTest(jtu.JaxTestCase):
"""Numerical tests for LAX operations."""
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.op, shapes, itertools.repeat(dtype)),
"op_name": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes,
"dtype": dtype}
for shape_group in compatible_shapes
for shapes in itertools.combinations_with_replacement(shape_group, rec.nargs)
for dtype in rec.dtypes)
for rec in LAX_OPS))
def testOp(self, op_name, rng_factory, shapes, dtype):
rng = rng_factory(self.rng())
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
op = getattr(lax, op_name)
self._CompileAndCheck(op, args_maker)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.op, shapes, itertools.repeat(dtype)),
"op_name": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes,
"dtype": dtype, "tol": rec.tol}
for shape_group in compatible_shapes
for shapes in itertools.combinations_with_replacement(shape_group, rec.nargs)
for dtype in rec.dtypes)
for rec in LAX_OPS))
def testOpAgainstNumpy(self, op_name, rng_factory, shapes, dtype, tol):
if (not config.x64_enabled and op_name == "nextafter"
and dtype == np.float64):
raise SkipTest("64-bit mode disabled")
rng = rng_factory(self.rng())
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
op = getattr(lax, op_name)
numpy_op = getattr(lax_reference, op_name)
self._CheckAgainstNumpy(numpy_op, op, args_maker, tol=tol)
# TODO test shift_left, shift_right_arithmetic, shift_right_logical
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_from_dtype={}_to_dtype={}_weak_type={}".format(
from_dtype, to_dtype, weak_type),
"from_dtype": from_dtype, "to_dtype": to_dtype, "weak_type": weak_type}
for from_dtype, to_dtype in itertools.product(
[None, np.float32, np.int32, "float32", "int32"], repeat=2)
for weak_type in [True, False]))
def testConvertElementType(self, from_dtype, to_dtype, weak_type):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng((2, 3), from_dtype)]
op = lambda x: lax._convert_element_type(x, to_dtype, weak_type)
self._CompileAndCheck(op, args_maker)
x = rng((1,), from_dtype)
out = op(x)
self.assertEqual(out.dtype, dtypes.canonicalize_dtype(to_dtype or x.dtype))
self.assertEqual(out.aval.weak_type, weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_from_dtype={}_to_dtype={}"
.format(from_dtype, to_dtype),
"from_dtype": from_dtype, "to_dtype": to_dtype}
for from_dtype, to_dtype in itertools.product(
[np.float32, np.int32, "float32", "int32"], repeat=2)))
def testConvertElementTypeAgainstNumpy(self, from_dtype, to_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng((2, 3), from_dtype)]
op = lambda x: lax.convert_element_type(x, to_dtype)
numpy_op = lambda x: lax_reference.convert_element_type(x, to_dtype)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_from_dtype={}_to_dtype={}"
.format(from_dtype, to_dtype),
"from_dtype": from_dtype, "to_dtype": to_dtype}
for from_dtype, to_dtype in itertools.product(
[np.float32, np.int32, "float32", "int32"], repeat=2)))
def testBitcastConvertType(self, from_dtype, to_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng((2, 3), from_dtype)]
op = lambda x: lax.bitcast_convert_type(x, to_dtype)
self._CompileAndCheck(op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_from_dtype={}_to_dtype={}"
.format(from_dtype, to_dtype),
"from_dtype": from_dtype, "to_dtype": to_dtype}
for from_dtype, to_dtype in itertools.product(
[np.float32, np.int32, "float32", "int32"], repeat=2)))
def testBitcastConvertTypeAgainstNumpy(self, from_dtype, to_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng((2, 3), from_dtype)]
op = lambda x: lax.bitcast_convert_type(x, to_dtype)
numpy_op = lambda x: lax_reference.bitcast_convert_type(x, to_dtype)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_from_dtype={}_to_dtype={}_weak_type={}"
.format(from_dtype, to_dtype, weak_type),
"from_dtype": from_dtype, "to_dtype": to_dtype, "weak_type": weak_type}
for from_dtype, to_dtype in itertools.product(
[np.float32, np.int32, "float32", "int32"], repeat=2)
for weak_type in [True, False]))
def testBitcastConvertWeakType(self, from_dtype, to_dtype, weak_type):
rng = jtu.rand_default(self.rng())
x_in = lax._convert_element_type(rng((2, 3), from_dtype),
weak_type=weak_type)
op = lambda x: lax.bitcast_convert_type(x, to_dtype)
self.assertEqual(dtypes.is_weakly_typed(x_in), weak_type)
x_out = op(x_in)
self.assertEqual(dtypes.is_weakly_typed(x_out), False)
x_out_jit = jax.jit(op)(x_in)
self.assertEqual(dtypes.is_weakly_typed(x_out_jit), False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_min_shape={}_operand_shape={}_max_shape={}".format(
jtu.format_shape_dtype_string(min_shape, dtype),
jtu.format_shape_dtype_string(operand_shape, dtype),
jtu.format_shape_dtype_string(max_shape, dtype)),
"min_shape": min_shape, "operand_shape": operand_shape,
"max_shape": max_shape, "dtype": dtype}
for min_shape, operand_shape, max_shape in [
[(), (2, 3), ()],
[(2, 3), (2, 3), ()],
[(), (2, 3), (2, 3)],
[(2, 3), (2, 3), (2, 3)],
]
for dtype in default_dtypes))
def testClamp(self, min_shape, operand_shape, max_shape, dtype):
rng = jtu.rand_default(self.rng())
shapes = [min_shape, operand_shape, max_shape]
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
self._CompileAndCheck(lax.clamp, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_min_shape={}_operand_shape={}_max_shape={}".format(
jtu.format_shape_dtype_string(min_shape, dtype),
jtu.format_shape_dtype_string(operand_shape, dtype),
jtu.format_shape_dtype_string(max_shape, dtype)),
"min_shape": min_shape, "operand_shape": operand_shape,
"max_shape": max_shape, "dtype": dtype}
for min_shape, operand_shape, max_shape in [
[(), (2, 3), ()],
[(2, 3), (2, 3), ()],
[(), (2, 3), (2, 3)],
[(2, 3), (2, 3), (2, 3)],
]
for dtype in default_dtypes))
def testClampAgainstNumpy(self, min_shape, operand_shape, max_shape, dtype):
rng = jtu.rand_default(self.rng())
shapes = [min_shape, operand_shape, max_shape]
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
self._CheckAgainstNumpy(lax_reference.clamp, lax.clamp, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dim={}_baseshape=[{}]_dtype={}_narrs={}".format(
dim, ",".join(str(d) for d in base_shape), np.dtype(dtype).name,
num_arrs),
"dim": dim, "base_shape": base_shape, "dtype": dtype, "num_arrs": num_arrs}
for num_arrs in [3]
for dtype in default_dtypes
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for dim in range(len(base_shape))))
def testConcatenate(self, dim, base_shape, dtype, num_arrs):
rng = jtu.rand_default(self.rng())
shapes = [base_shape[:dim] + (size,) + base_shape[dim+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), range(num_arrs))]
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
op = lambda *args: lax.concatenate(args, dim)
self._CompileAndCheck(op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dim={}_baseshape=[{}]_dtype={}_narrs={}".format(
dim, ",".join(str(d) for d in base_shape), np.dtype(dtype).name,
num_arrs),
"dim": dim, "base_shape": base_shape, "dtype": dtype, "num_arrs": num_arrs}
for num_arrs in [3]
for dtype in default_dtypes
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for dim in range(len(base_shape))))
def testConcatenateAgainstNumpy(self, dim, base_shape, dtype, num_arrs):
rng = jtu.rand_default(self.rng())
shapes = [base_shape[:dim] + (size,) + base_shape[dim+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), range(num_arrs))]
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
op = lambda *args: lax.concatenate(args, dim)
numpy_op = lambda *args: lax_reference.concatenate(args, dim)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding}
for lhs_shape, rhs_shape in [
((b, i, 9, 10), (j, i, 4, 5))
for b, i, j in itertools.product([2, 3], repeat=3)]
for dtype in float_dtypes
for strides in [(1, 1), (1, 2), (2, 1)]
for padding in ["VALID", "SAME"]))
def testConv(self, lhs_shape, rhs_shape, dtype, strides, padding):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv(lhs, rhs, strides, padding)
self._CompileAndCheck(fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_preferred_element_type={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
preferred_element_type.__name__),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"preferred_element_type": preferred_element_type}
for lhs_shape, rhs_shape in [
((b, i, 9, 10), (j, i, 4, 5))
for b, i, j in itertools.product([2, 3], repeat=3)]
for dtype, preferred_element_type in preferred_type_combinations))
def testConvPreferredElement(self, lhs_shape, rhs_shape, dtype, preferred_element_type):
if (not config.x64_enabled and
(dtype == np.float64 or preferred_element_type == np.float64
or dtype == np.int64 or preferred_element_type == np.int64
or dtype == np.complex128 or preferred_element_type == np.complex128)):
raise SkipTest("64-bit mode disabled")
if jtu.device_under_test() == "gpu" and np.issubdtype(dtype, np.integer):
# TODO(b/183565702): Support integer convolutions on CPU/GPU.
raise SkipTest("Integer convolution not yet supported on GPU")
if (jtu.device_under_test() == "tpu" and
(dtype == np.complex128 or preferred_element_type == np.complex128)):
raise SkipTest("np.complex128 is not yet supported on TPU")
# x64 implementation is only accurate to ~float32 precision for this case.
if dtype == np.complex64 and preferred_element_type == np.complex128:
tol = 1e-5
else:
tol = {np.float64: 1e-14}
rng = jtu.rand_default(self.rng())
x = rng(lhs_shape, dtype)
y = rng(rhs_shape, dtype)
# We first compute the conv when both inputs are a lower-precision type and
# preferred_element_type is a higher-precision type. We then compute results
# where the inputs are first upcast to the higher-precision type and no
# `preferred_element_type` is given. We expect the result to be extremely
# similar given the semantics of `preferred_element_type`.
result_with_preferred_type = lax.conv(
x, y, (1, 1), "VALID",
preferred_element_type=preferred_element_type)
result_with_upcast_inputs = lax.conv(
x.astype(preferred_element_type),
y.astype(preferred_element_type),
(1, 1), "VALID")
self.assertArraysAllClose(
result_with_preferred_type, result_with_upcast_inputs, rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding}
for lhs_shape, rhs_shape in [
((b, i, 9, 10), (j, i, 4, 5))
for b, i, j in itertools.product([2, 3], repeat=3)]
for dtype in float_dtypes
for strides in [(1, 1), (1, 2), (2, 1)]
for padding in ["VALID", "SAME"]))
def testConvAgainstNumpy(self, lhs_shape, rhs_shape, dtype, strides, padding):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
op = lambda lhs, rhs: lax.conv(lhs, rhs, strides, padding)
numpy_op = lambda lhs, rhs: lax_reference.conv(lhs, rhs, strides, padding)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}_strides={}_padding={}"
"_lhs_dilation={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
strides, padding, lhs_dilation, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "lhs_dilation": lhs_dilation,
"rhs_dilation": rhs_dilation}
for lhs_shape, rhs_shape in [
((b, i, 9, 10), (j, i, 4, 5))
for b, i, j in itertools.product([1, 2, 3], repeat=3)]
for dtype in float_dtypes
for strides in [(1, 1), (1, 2), (2, 1)]
for padding in [((0, 0), (0, 0)), ((1, 2), (2, 0))]
for lhs_dilation, rhs_dilation in itertools.product(
[(1, 1), (1, 2), (2, 2)], repeat=2)))
def testConvWithGeneralPadding(self, lhs_shape, rhs_shape, dtype, strides,
padding, lhs_dilation, rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_with_general_padding(
lhs, rhs, strides, padding, lhs_dilation, rhs_dilation)
self._CompileAndCheck(fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}_strides={}_padding={}"
"_lhs_dilation={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
strides, padding, lhs_dilation, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "lhs_dilation": lhs_dilation,
"rhs_dilation": rhs_dilation}
for lhs_shape, rhs_shape in [
((b, i, 9, 10), (j, i, 4, 5))
for b, i, j in itertools.product([1, 2, 3], repeat=3)]
for dtype in [np.float32] for strides in [(1, 1), (1, 2), (2, 1)]
for padding in [((0, 0), (0, 0)), ((1, 2), (2, 0))]
for lhs_dilation, rhs_dilation in itertools.product(
[(1, 1), (1, 2), (2, 2)], repeat=2)))
def testConvWithGeneralPaddingAgainstNumpy(
self, lhs_shape, rhs_shape, dtype, strides, padding, lhs_dilation,
rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_with_general_padding(
lhs, rhs, strides, padding, lhs_dilation, rhs_dilation,
precision=lax.Precision.HIGHEST)
def numpy_fun(lhs, rhs):
return lax_reference.conv_with_general_padding(
lhs, rhs, strides, padding, lhs_dilation, rhs_dilation)
self._CheckAgainstNumpy(numpy_fun, fun, args_maker)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_lhs_shape={}_rhs_shape={}_strides={}_padding={}"
"_lhs_dilation={}_rhs_dilation={}"
"_dims={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
strides, padding, lhs_dilation, rhs_dilation,
",".join(dim_nums)),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "lhs_dilation": lhs_dilation,
"rhs_dilation": rhs_dilation, "dimension_numbers": dim_nums,
"feature_group_count": feature_group_count,
"batch_group_count": batch_group_count, "perms": perms
} for batch_group_count, feature_group_count in s([(1, 1), (2, 1), (1, 2)])
for lhs_shape, rhs_shape in s([
((b * batch_group_count, i * feature_group_count, 9, w),
(j * feature_group_count * batch_group_count, i, 4, 5))
for w in [0, 10]
for b, i, j in itertools.product([2, 3], repeat=3)])
for dtype in s(all_dtypes)
for strides in s([(1, 1), (2, 1)])
for padding in s([((1, 2), (2, 0)), ((10, 8), (7, 13))])
for lhs_dilation, rhs_dilation in s(itertools.product(
[(1, 1), (1, 2), (1, 4)], repeat=2))
for dim_nums, perms in s([
(("NCHW", "OIHW", "NCHW"), ([0, 1, 2, 3], [0, 1, 2, 3])),
(("NHWC", "HWIO", "NHWC"), ([0, 2, 3, 1], [2, 3, 1, 0])),
(("NCHW", "HWIO", "NHWC"), ([0, 1, 2, 3], [2, 3, 1, 0])),
]))))
def testConvGeneralDilated(self, lhs_shape, rhs_shape, dtype, strides,
padding, lhs_dilation, rhs_dilation,
feature_group_count, batch_group_count,
dimension_numbers, perms):
if np.issubdtype(dtype, np.integer) or np.issubdtype(dtype, np.bool_):
# TODO(b/183565702): Support integer convolutions on CPU/GPU.
if jtu.device_under_test() == "gpu":
raise SkipTest("Integer convolution not yet supported on GPU")
rng = jtu.rand_small(self.rng())
lhs_perm, rhs_perm = perms # permute to compatible shapes
def args_maker():
return [lax.transpose(rng(lhs_shape, dtype), lhs_perm),
lax.transpose(rng(rhs_shape, dtype), rhs_perm)]
def fun(lhs, rhs):
return lax.conv_general_dilated(
lhs, rhs, strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count=feature_group_count,
batch_group_count=batch_group_count)
self._CompileAndCheck(fun, args_maker)
def testConvGeneralDilatedPatchesOverlapping1D(self):
lhs = np.array([[1]], np.float32).reshape((1, 1))
patches = lax.conv_general_dilated_patches(
lhs=lhs,
filter_shape=(),
window_strides=(),
padding='SAME'
)
self.assertAllClose(lhs, patches)
dn = ('NHC', 'OIH', 'NHC')
lhs = np.array([1, 2, 3, 4, 5], np.float32).reshape((1, -1, 1))
patches = lax.conv_general_dilated_patches(
lhs=lhs,
filter_shape=(2,),
window_strides=(2,),
padding='VALID',
dimension_numbers=dn
)
self.assertAllClose(
np.array([[1, 2],
[3, 4]], np.float32).reshape((1, 2, 2)), patches)
patches = lax.conv_general_dilated_patches(
lhs=lhs,
filter_shape=(3,),
window_strides=(1,),
padding='SAME',
dimension_numbers=dn
)
self.assertAllClose(
np.array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 0]], np.float32).reshape((1, 5, 3)), patches)
patches = lax.conv_general_dilated_patches(
lhs=lhs,
filter_shape=(3,),
window_strides=(1,),
padding='SAME',
rhs_dilation=(2,),
dimension_numbers=dn
)
self.assertAllClose(
np.array([[0, 1, 3],
[0, 2, 4],
[1, 3, 5],
[2, 4, 0],
[3, 5, 0]], np.float32).reshape((1, 5, 3)), patches)
def testConvGeneralDilatedPatchesOverlapping2D(self):
lhs = np.array([[1, 2, 3],
[4, 5, 6]], np.float32).reshape((1, 2, 3, 1))
patches = lax.conv_general_dilated_patches(
lhs=lhs,
filter_shape=(2, 2),
window_strides=(1, 1),
padding='SAME',
dimension_numbers=('NHWC', 'OIHW', 'NHWC')
)
self.assertAllClose(np.array([[1, 2, 4, 5],
[2, 3, 5, 6],
[3, 0, 6, 0],
[4, 5, 0, 0],
[5, 6, 0, 0],
[6, 0, 0, 0]],
np.float32).reshape((1, 2, 3, 4)), patches)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_filter_shape={}_strides={}_padding={}"
"_dims={}_precision={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(filter_shape, dtype),
strides,
padding,
"None" if dim_nums is None else ",".join(dim_nums),
precision
),
"lhs_shape": lhs_shape,
"filter_shape": filter_shape,
"dtype": dtype,
"strides": strides,
"padding": padding,
"dimension_numbers": dim_nums,
"precision": precision
}
for dtype in all_dtypes
for lhs_shape, filter_shape, strides, padding, dim_nums in [
((2, 5), (), (), [], ("NC", "OI", "CN")),
((2, 3, 4), (2,), (2,), [(0, 2)], ("CNH", "OHI", "HNC")),
((3, 1, 4, 5), (1, 3), (1, 3), [(3, 1), (2, 2)],
("NCHW", "OIHW", "NCHW")),
((3, 2, 5, 6), (4, 3), (4, 3), [(5, 2), (2, 4)],
None),
((1, 2, 3, 4), (1, 1), (1, 1), [(0, 0), (0, 0)],
("NCWH", "OHWI", "CNHW")),
((1, 2, 3, 4), (3, 2), (1, 1), [(0, 0), (0, 0)],
("CWHN", "HOWI", "NCHW")),
((2, 3, 4, 5, 6), (2, 1, 3), (2, 1, 3), [(1, 2), (5, 3), (3, 5)],
("NHWDC", "HDIWO", "DCWNH"))
]
for precision in [None,
lax.Precision.DEFAULT,
lax.Precision.HIGH,
lax.Precision.HIGHEST]
))
def testConvGeneralDilatedPatchesNonOverlapping(self,
lhs_shape,
filter_shape,
dtype,
strides,
padding,
dimension_numbers,
precision):
if np.issubdtype(dtype, np.integer) or np.issubdtype(dtype, np.bool_):
# TODO(b/183565702): Support integer convolutions on CPU/GPU.
if jtu.device_under_test() == "gpu":
raise SkipTest("Integer convolution not yet supported on GPU")
rng = jtu.rand_small(self.rng())
lhs = rng(lhs_shape, dtype)
if dimension_numbers is None:
lhs_spec, rhs_spec, out_spec = "NCHW", "OIHW", "NCHW"
else:
lhs_spec, rhs_spec, out_spec = dimension_numbers
filter_spec = ''.join(c for c in rhs_spec if c not in ('I', 'O'))
patches_spec = out_spec.replace('C', 'C' + filter_spec.lower())
full_padding = []
for c in lhs_spec:
if c in ('N', 'C'):
full_padding += [(0, 0)]
else:
full_padding += [padding[filter_spec.index(c)]]
lhs_padded = np.pad(lhs, full_padding, 'constant')
out = lax.transpose(lhs_padded, [lhs_spec.index(c) for c in out_spec])
patches = lax.conv_general_dilated_patches(
lhs=lhs,
filter_shape=filter_shape,
window_strides=strides,
padding=padding,
dimension_numbers=dimension_numbers,
precision=precision
)
source = []
# Test that output spatial shape is factored into `#patches x patch_size`.
for c in out_spec:
out_c = out.shape[out_spec.index(c)]
patch_c = patches.shape[out_spec.index(c)]
if c == 'N':
self.assertEqual(out_c, patch_c)
elif c == 'C':
self.assertEqual(out_c * np.prod(filter_shape), patch_c)
else:
self.assertEqual(out_c, patch_c * filter_shape[filter_spec.index(c)])
source += [patches_spec.index(c), patches_spec.index(c.lower())]
# Test that stacking patches together gives the source image, padded.
c = out_spec.index('C')
patches = patches.reshape(patches.shape[:c] +
(lhs_shape[lhs_spec.index('C')],) +
filter_shape +
patches.shape[c + 1:]
)
patches = np.moveaxis(patches, source, range(len(source)))
for i in range(len(filter_shape)):
patches = patches.reshape(patches.shape[:i] + (-1,) +
patches.shape[2 + i:])
patches = np.moveaxis(
patches,
range(len(filter_shape)),
[out_spec.index(c) for c in out_spec if c not in ('N', 'C')])
self.assertAllClose(out, patches)
# TODO(mattjj): test conv_general_dilated against numpy
def testConv0DIsDot(self):
rng = jtu.rand_default(self.rng())
def args_maker():
return [rng((10, 5), np.float32), rng((5, 7), np.float32)]
jnp_fun = partial(lax.conv_general_dilated, window_strides=(),
padding='VALID', dimension_numbers=('NC', 'IO', 'NC'))
self._CompileAndCheck(jnp_fun, args_maker)
self._CheckAgainstNumpy(np.dot, jnp_fun, args_maker, tol=.1)
def testGradConv0D(self):
# Reproduces a failure in neural_tangents not caught in our presubmit tests
# See cl/367416742.
lhs = np.ones((2, 5), dtype=np.float32)
rhs = np.ones((5, 10), dtype=np.float32)
def f_jax(lhs, rhs):
return lax.conv_general_dilated(
lhs, rhs, window_strides=(),
padding=(), lhs_dilation=(), rhs_dilation=(),
dimension_numbers=lax.ConvDimensionNumbers((0, 1), (1, 0), (0, 1)),
batch_group_count=1, feature_group_count=1, precision=None,
preferred_element_type=None)
res, pullback = jax.vjp(f_jax, lhs, rhs)
grad = pullback(np.ones_like(res))
self.assertAllClose((lhs * 10., rhs * 2.), grad)
@staticmethod
def _conv_transpose_via_grad(data, kernel, strides, padding,
rhs_dilation=None, dimension_numbers=None):
"""Helper method: calculates conv transpose via grad for testing."""
assert len(data.shape) == len(kernel.shape)
nspatial = len(data.shape) - 2
one = (1,) * nspatial
rhs_dilation = rhs_dilation or one
dn = lax.conv_dimension_numbers(data.shape, kernel.shape,
dimension_numbers)
in_shape = np.take(data.shape, dn.lhs_spec)
in_sdims = in_shape[2:]
k_shape = np.take(kernel.shape, dn.rhs_spec)
k_sdims = k_shape[2:]
e_k_sdims = [(k-1) * r + 1 for k, r in zip(k_sdims, rhs_dilation)]
if padding == 'VALID':
o_sdims = [in_sdims[i]*strides[i] + max(e_k_sdims[i]-strides[i],0)
for i in range(nspatial)]
elif padding == 'SAME':
o_sdims = [in_sdims[i]*strides[i] for i in range(nspatial)]
o_shape = [in_shape[0], k_shape[1]] + o_sdims
out_spec_inv = [x[0] for x in
sorted(enumerate(dn.out_spec), key=lambda x: x[1])]
o_layout = np.take(np.array(o_shape), out_spec_inv)
placeholder = np.ones(o_layout, data.dtype)
conv = lambda x: lax.conv_general_dilated(x, kernel, strides, padding,
one, rhs_dilation, dn)
_, g = jax.vjp(conv, placeholder)
return g(data)[0]
@staticmethod
def _transpose_conv_kernel(data, kernel, dimension_numbers):
dn = lax.conv_dimension_numbers(data.shape, kernel.shape,
dimension_numbers)
spatial_axes = np.array(dn.rhs_spec)[2:]
for axis in spatial_axes:
kernel = np.flip(kernel, axis)
kernel = np.swapaxes(kernel, dn.rhs_spec[0], dn.rhs_spec[1])
return kernel
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "rhs_dilation": rhs_dilation,
"dspec": dspec}
for lhs_shape, rhs_shape in [
((b, 9, 10, i), (k, k, j, i)) # NB: i,j flipped in RHS for transpose
for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]
for dtype in float_dtypes
for strides in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)]
for padding in ["VALID", "SAME"]
for dspec in [('NHWC', 'HWIO', 'NHWC'),]
for rhs_dilation in [None, (2, 2)]))
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testConvTranspose2DT(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
# NB: this test calculates conv_transpose performing identically to the
# lhs-grad of conv.
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec,
transpose_kernel=True)
def fun_via_grad(lhs, rhs):
return self._conv_transpose_via_grad(lhs, rhs, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "rhs_dilation": rhs_dilation,
"dspec": dspec}
for lhs_shape, rhs_shape in [
((b, 9, 10, i), (k, k, i, j))
for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]
for dtype in float_dtypes
for strides in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)]
for padding in ["VALID", "SAME"]
for dspec in [('NHWC', 'HWIO', 'NHWC'),]
for rhs_dilation in [None, (2, 2)]))
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testConvTranspose2D(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec,
transpose_kernel=False)
def fun_via_grad(lhs, rhs):
rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)
return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "rhs_dilation": rhs_dilation,
"dspec": dspec}
for lhs_shape, rhs_shape in [
((b, 10, i), (k, i, j))
for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]
for dtype in float_dtypes
for strides in [(1,), (2,), (3,)]
for padding in ["VALID", "SAME"]
for dspec in [('NHC', 'HIO', 'NHC'),]
for rhs_dilation in [None, (2,)]))
def testConvTranspose1D(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
dimension_numbers=dspec,
rhs_dilation=rhs_dilation,
transpose_kernel=False)
def fun_via_grad(lhs, rhs):
rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)
return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "rhs_dilation": rhs_dilation,
"dspec": dspec}
for lhs_shape, rhs_shape in [
((b, i), (i, j))
for b, i, j in itertools.product([2,3],[2,3],[2,3])]
for dtype in float_dtypes
for strides in [()]
for padding in ["VALID", "SAME"]
for dspec in [('NC', 'IO', 'NC'),]
for rhs_dilation in [None, ()]))
def testConvTranspose0D(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
dimension_numbers=dspec,
rhs_dilation=rhs_dilation,
transpose_kernel=False)
def fun_via_grad(lhs, rhs):
rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)
return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)
def testConvTransposePaddingList(self):
# Regression test for https://github.com/google/jax/discussions/8695
a = jnp.ones((28,28))
b = jnp.ones((3,3))
c = lax.conv_general_dilated(a[None, None], b[None, None], (1,1), [(0,0),(0,0)], (1,1))
self.assertArraysEqual(c, 9 * jnp.ones((1, 1, 26, 26)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}_precision={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
precision),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"precision": precision}
for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]
for dtype in all_dtypes
for precision in [None, lax.Precision.DEFAULT, lax.Precision.HIGH,
lax.Precision.HIGHEST,
(lax.Precision.DEFAULT, lax.Precision.HIGHEST)]))
def testDot(self, lhs_shape, rhs_shape, dtype, precision):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
self._CompileAndCheck(partial(lax.dot, precision=precision), args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}_preferred_element_type={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
jtu.format_shape_dtype_string((), preferred_element_type)
),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype, "preferred_element_type": preferred_element_type
}
for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]
for dtype, preferred_element_type in preferred_type_combinations))
def testDotPreferredElement(self, lhs_shape, rhs_shape, dtype, preferred_element_type):
if (not config.x64_enabled and
(dtype == np.float64 or preferred_element_type == np.float64
or dtype == np.int64 or preferred_element_type == np.int64)):
raise SkipTest("64-bit mode disabled")
if (jtu.device_under_test() == "tpu" and
(dtype == np.complex128 or preferred_element_type == np.complex128)):
raise SkipTest("np.complex128 is not yet supported on TPU")
if jtu.device_under_test() == "gpu":
# TODO(b/189287598)
raise SkipTest("dot_general with preferred_element_type returns NaN non-deterministically on GPU")
rng = jtu.rand_default(self.rng())
x = rng(lhs_shape, dtype)
y = rng(rhs_shape, dtype)
# We first compute the dot when both inputs are a lower-precision type and
# preferred_element_type is a higher-precision type. We then compute results
# where the inputs are first upcast to the higher-precision type and no
# `preferred_element_type` is given. We expect the result to be extremely
# similar given the semantics of `preferred_element_type`.
result_with_preferred_type = lax.dot(x, y, preferred_element_type=preferred_element_type)
result_with_upcast_inputs = lax.dot(
x.astype(preferred_element_type),
y.astype(preferred_element_type))
self.assertArraysAllClose(result_with_preferred_type, result_with_upcast_inputs)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype)),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype}
for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]
for dtype in all_dtypes))
def testDotAgainstNumpy(self, lhs_shape, rhs_shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
tol = {
np.float16: 1e-2,
np.float64: max(jtu.default_tolerance()[np.dtype(np.float64)], 1e-14),
np.complex128: max(jtu.default_tolerance()[np.dtype(np.complex128)],
1e-14)
}
lax_op = partial(lax.dot, precision=lax.Precision.HIGHEST)
self._CheckAgainstNumpy(lax_reference.dot, lax_op, args_maker, tol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_lhs_contracting={}_rhs_contracting={}"
.format(jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
lhs_contracting, rhs_contracting),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"lhs_contracting": lhs_contracting, "rhs_contracting": rhs_contracting}
for lhs_shape, rhs_shape, lhs_contracting, rhs_contracting in [
[(5,), (5,), [0], [0]],
[(5, 7), (5,), [0], [0]],
[(7, 5), (5,), [1], [0]],
[(3, 5), (2, 5), [1], [1]],
[(5, 3), (5, 2), [0], [0]],
[(5, 3, 2), (5, 2, 4), [0], [0]],
[(5, 3, 2), (5, 2, 4), [0,2], [0,1]],
[(5, 3, 2), (3, 5, 2, 4), [0,2], [1,2]],
[(1, 2, 2, 3), (1, 2, 3, 1), [1], [1]],
[(3, 2), (2, 4), [1], [0]],
]
for dtype in all_dtypes))
def testDotGeneralContractOnly(self, lhs_shape, rhs_shape, dtype,
lhs_contracting, rhs_contracting):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
dimension_numbers = ((lhs_contracting, rhs_contracting), ([], []))
def fun(lhs, rhs):
return lax.dot_general(lhs, rhs, dimension_numbers)
self._CompileAndCheck(fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_dimension_numbers={}"
.format(jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
dimension_numbers),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"dimension_numbers": dimension_numbers}
for lhs_shape, rhs_shape, dimension_numbers in [
((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),
((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1]))),
((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))),
]
for dtype in all_dtypes))
def testDotGeneralContractAndBatch(self, lhs_shape, rhs_shape, dtype,
dimension_numbers):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.dot_general(lhs, rhs, dimension_numbers)
self._CompileAndCheck(fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_dimension_numbers={}"
.format(jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
dimension_numbers),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"dimension_numbers": dimension_numbers}
for lhs_shape, rhs_shape, dimension_numbers in [
((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),
((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1]))),
((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))),
]
for dtype in all_dtypes))
def testDotGeneralAgainstNumpy(self, lhs_shape, rhs_shape, dtype,
dimension_numbers):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
op = lambda x, y: lax.dot_general(x, y, dimension_numbers)
numpy_op = lambda x, y: lax_reference.dot_general(x, y, dimension_numbers)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_broadcast_sizes={}".format(
shape, np.dtype(dtype).name, broadcast_sizes),
"shape": shape, "dtype": dtype, "broadcast_sizes": broadcast_sizes}
for shape in [(), (2, 3)]
for dtype in default_dtypes
for broadcast_sizes in [(), (2,), (1, 2)]))
def testBroadcast(self, shape, dtype, broadcast_sizes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.broadcast(x, broadcast_sizes)
self._CompileAndCheck(op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_broadcast_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), broadcast_sizes),
"shape": shape, "dtype": dtype, "broadcast_sizes": broadcast_sizes}
for shape in [(), (2, 3)]
for dtype in default_dtypes
for broadcast_sizes in [(), (2,), (1, 2)]))
def testBroadcastAgainstNumpy(self, shape, dtype, broadcast_sizes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.broadcast(x, broadcast_sizes)
numpy_op = lambda x: lax_reference.broadcast(x, broadcast_sizes)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_bcdims={}".format(
jtu.format_shape_dtype_string(inshape, dtype),
outshape, broadcast_dimensions),
"inshape": inshape, "dtype": dtype, "outshape": outshape,
"dimensions": broadcast_dimensions}
for inshape, outshape, broadcast_dimensions in [
([2], [2, 2], [0]),
([2], [2, 2], [1]),
([2], [2, 3], [0]),
([], [2, 3], []),
([1], [2, 3], [1]),
]
for dtype in default_dtypes))
def testBroadcastInDim(self, inshape, dtype, outshape, dimensions):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(inshape, dtype)]
op = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)
self._CompileAndCheck(op, args_maker)
def testBroadcastInDimOperandShapeTranspose(self):
# Regression test for https://github.com/google/jax/issues/5276
def f(x):
return lax.broadcast_in_dim(x, (2, 3, 4), broadcast_dimensions=(0, 1, 2)).sum()
def g(x):
return lax.broadcast_in_dim(x.reshape((3,)), (2, 3, 4), broadcast_dimensions=(1,)).sum()
x = np.ones((1, 3, 1))
self.assertArraysEqual(jax.grad(f)(x), jax.grad(g)(x))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_bcdims={}".format(
jtu.format_shape_dtype_string(inshape, np.float32),
outshape, broadcast_dimensions),
"inshape": inshape, "outshape": outshape,
"broadcast_dimensions": broadcast_dimensions, "err_msg": err_msg}
for inshape, outshape, broadcast_dimensions, err_msg in [
([2], [2, 2], [0, 1], ('broadcast_dimensions must have length equal to '
'operand ndim')),
([2, 2], [2], [0, 1], ('target broadcast shape must have equal or higher rank '
'to the operand shape')),
([2], [2, 3], [2], ('broadcast_in_dim broadcast_dimensions must be a subset of output '
'dimensions')),
([2], [3], [0], ('operand dimension sizes must either be 1, or be '
'equal to their corresponding dimensions in the target broadcast shape')),
([2, 2], [2, 2], [1, 0], ('broadcast_dimensions must be strictly increasing')),
]))
def testBroadcastInDimShapeCheck(self, inshape, outshape, broadcast_dimensions, err_msg):
rng = jtu.rand_default(self.rng())
x = rng(inshape, np.float32)
with self.assertRaisesRegex(TypeError, err_msg):
lax.broadcast_in_dim(x, shape=outshape, broadcast_dimensions=broadcast_dimensions)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_bcdims={}".format(
jtu.format_shape_dtype_string(inshape, dtype),
outshape, broadcast_dimensions),
"inshape": inshape, "dtype": dtype, "outshape": outshape,
"dimensions": broadcast_dimensions}
for inshape, outshape, broadcast_dimensions in [
([2], [2, 2], [0]),
([2], [2, 2], [1]),
([2], [2, 3], [0]),
([], [2, 3], []),
([1], [2, 3], [1]),
]
for dtype in default_dtypes))
def testBroadcastInDimAgainstNumpy(self, inshape, dtype, outshape, dimensions):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(inshape, dtype)]
op = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)
numpy_op = lambda x: lax_reference.broadcast_in_dim(x, outshape, dimensions)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_dimensions={}".format(
jtu.format_shape_dtype_string(inshape, np.float32), dimensions),
"inshape": inshape, "dimensions": dimensions, "error_type": error_type,
"err_msg": err_msg}
for inshape, dimensions, error_type, err_msg in [
((1, 2, 3), (0, 0), ValueError, 'dimensions are not unique'),
((1, 2, 3), (3,), ValueError, 'axis 3 is out of bounds'),
((1, 2, 3), (-4,), ValueError, 'axis -4 is out of bounds'),
((1, 2, 3), (1,), ValueError, 'cannot select an axis to squeeze out'),
((1, 2, 3), (None,), TypeError, 'cannot be interpreted as an integer'),
]))
def testSqueezeShapeCheck(self, inshape, dimensions, error_type, err_msg):
rng = jtu.rand_default(self.rng())
x = rng(inshape, np.float32)
with self.assertRaisesRegex(error_type, err_msg):
lax.squeeze(x, dimensions=dimensions)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_dimensions={}".format(
jtu.format_shape_dtype_string(arg_shape, np.float32), dimensions),
"arg_shape": arg_shape, "dimensions": dimensions}
for arg_shape, dimensions in [
[(1,), (0,)],
[(1,), (-1,)],
[(2, 1, 4), (1,)],
[(2, 1, 3, 1), (1,)],
[(2, 1, 3, 1), (1, 3)],
[(2, 1, 3, 1), (3,)],
]))
def testSqueeze(self, arg_shape, dimensions):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(arg_shape, np.float32)]
op = lambda x: lax.squeeze(x, dimensions)
numpy_op = lambda x: lax_reference.squeeze(x, dimensions)
self._CompileAndCheck(op, args_maker)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
check_grads(op, args_maker(), 2, ["fwd", "rev"], eps=1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype}
for dtype in default_dtypes
for arg_shape, out_shape in [
[(3, 4), (12,)], [(2, 1, 4), (8,)], [(2, 2, 4), (2, 8)]
]))
def testReshape(self, arg_shape, out_shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(arg_shape, dtype)]
op = lambda x: lax.reshape(x, out_shape)
self._CompileAndCheck(op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype}
for dtype in default_dtypes
for arg_shape, out_shape in [
[(3, 4), (12,)], [(2, 1, 4), (8,)], [(2, 2, 4), (2, 8)]
]))
def testReshapeAgainstNumpy(self, arg_shape, out_shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(arg_shape, dtype)]
op = lambda x: lax.reshape(x, out_shape)
numpy_op = lambda x: lax_reference.reshape(x, out_shape)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
def testRoundRoundingMethods(self):
x = np.array([-2.5, -1.5, -0.5, 0.5, 1.5, 2.5], dtype=np.float32)
self.assertAllClose(lax.round(x, lax.RoundingMethod.AWAY_FROM_ZERO),
np.array([-3, -2, -1, 1, 2, 3], dtype=np.float32))
self.assertAllClose(lax.round(x, lax.RoundingMethod.TO_NEAREST_EVEN),
np.array([-2, -2, 0, 0, 2, 2], dtype=np.float32))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_pads={}"
.format(jtu.format_shape_dtype_string(shape, dtype), pads),
"shape": shape, "dtype": dtype, "pads": pads}
for dtype in default_dtypes
for shape, pads in [
((0, 2), [(1, 2, 1), (0, 1, 0)]),
((2, 3), [(1, 2, 1), (0, 1, 0)]),
((2,), [(1, 2, 0)]),
((1, 2), [(1, 2, 0), (3, 4, 0)]),
((1, 2), [(0, 0, 0), (0, 0, 0)]),
((2,), [(1, 2, 3),]),
((3, 2), [(1, 2, 1), (3, 4, 2)]),
((2,), [(-1, 2, 0),]),
((4, 2), [(-1, -2, 0), (1, 2, 0)]),
((4, 2), [(-1, 2, 0), (1, 2, 2)]),
((5,), [(-1, -2, 2),]),
((4, 2), [(-1, -2, 1), (1, 2, 2)])
]))
def testPad(self, shape, dtype, pads):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(shape, dtype)]
fun = lambda operand: lax.pad(operand, np.array(0, dtype), pads)
self._CompileAndCheck(fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_pads={}"
.format(jtu.format_shape_dtype_string(shape, dtype), pads),
"shape": shape, "dtype": dtype, "pads": pads}
for shape in [(2, 3)]
for dtype in default_dtypes
for pads in [
[(0, 0, 0), (0, 0, 0)], # no padding
[(1, 1, 0), (2, 2, 0)], # only positive edge padding
[(1, 2, 1), (0, 1, 0)], # edge padding and interior padding
[(0, 0, 0), (-1, -1, 0)], # negative padding
[(0, 0, 0), (-2, -2, 4)], # add big dilation then remove from edges
[(0, 0, 0), (-2, -3, 1)], # remove everything in one dimension
]))
def testPadAgainstNumpy(self, shape, dtype, pads):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.pad(x, np.array(0, dtype), pads)
numpy_op = lambda x: lax_reference.pad(x, np.array(0, dtype), pads)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
def testPadErrors(self):
with self.assertRaisesRegex(ValueError, "padding_config"):
lax.pad(np.zeros(2), 0., [(0, 1, 0), (0, 1, 0)])
with self.assertRaisesRegex(ValueError, "interior padding in padding_config must be nonnegative"):
lax.pad(np.zeros(2), 0., [(0, 1, -1)])
with self.assertRaisesRegex(ValueError, "Dimension size after padding is not at least 0"):
lax.pad(np.zeros(2), 0., [(-3, 0, 0)])
with self.assertRaisesRegex(ValueError, "Dimension size after padding is not at least 0"):
lax.pad(np.zeros(2), 0., [(-4, 0, 1)])
def testReverse(self):
rev = jax.jit(lambda operand: lax.rev(operand, dimensions))
dimensions = []
self.assertAllClose(np.array([0, 1, 2, 3]), rev(np.array([0, 1, 2, 3])),
check_dtypes=False)
dimensions = [0]
self.assertAllClose(np.array([3, 2, 1]), rev(np.array([1, 2, 3])),
check_dtypes=False)
dimensions = [0, 1]
self.assertAllClose(np.array([[6, 5, 4], [3, 2, 1]]),
rev(np.array([[1, 2, 3], [4, 5, 6]])),
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_predshape={}_argshapes={}".format(
jtu.format_shape_dtype_string(pred_shape, np.bool_),
jtu.format_shape_dtype_string(arg_shape, arg_dtype)),
"pred_shape": pred_shape, "arg_shape": arg_shape, "arg_dtype": arg_dtype}
for arg_shape in [(), (3,), (2, 3)]
for pred_shape in ([(), arg_shape] if arg_shape else [()])
for arg_dtype in default_dtypes))
def testSelect(self, pred_shape, arg_shape, arg_dtype):
rng = jtu.rand_default(self.rng())
def args_maker():
return [rng(pred_shape, np.bool_), rng(arg_shape, arg_dtype),
rng(arg_shape, arg_dtype)]
return self._CompileAndCheck(lax.select, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_predshape={}_argshapes={}".format(
jtu.format_shape_dtype_string(pred_shape, np.bool_),
jtu.format_shape_dtype_string(arg_shape, arg_dtype)),
"pred_shape": pred_shape, "arg_shape": arg_shape, "arg_dtype": arg_dtype}
for arg_shape in [(), (3,), (2, 3)]
for pred_shape in ([(), arg_shape] if arg_shape else [()])
for arg_dtype in default_dtypes))
def testSelectAgainstNumpy(self, pred_shape, arg_shape, arg_dtype):
rng = jtu.rand_default(self.rng())
def args_maker():
return [rng(pred_shape, np.bool_), rng(arg_shape, arg_dtype),
rng(arg_shape, arg_dtype)]
return self._CheckAgainstNumpy(lax_reference.select, lax.select, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_indices={}_limit_indices={}_strides={}".format(
jtu.format_shape_dtype_string(shape, dtype),
indices, limit_indices, strides),
"shape": shape, "dtype": dtype, "starts": indices,
"limits": limit_indices, "strides": strides}
for shape, indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
]
for dtype in default_dtypes))
def testSlice(self, shape, dtype, starts, limits, strides):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.slice(x, starts, limits, strides)
self._CompileAndCheck(op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_indices={}_limit_indices={}_strides={}".format(
jtu.format_shape_dtype_string(shape, dtype),
indices, limit_indices, strides),
"shape": shape, "dtype": dtype, "starts": indices,
"limits": limit_indices, "strides": strides}
for shape, indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
]
for dtype in default_dtypes))
def testSliceAgainstNumpy(self, shape, dtype, starts, limits, strides):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.slice(x, starts, limits, strides)
numpy_op = lambda x: lax_reference.slice(x, starts, limits, strides)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_indices={}_size_indices={}".format(
jtu.format_shape_dtype_string(shape, dtype),
indices, size_indices),
"shape": shape, "dtype": dtype, "indices": indices,
"size_indices": size_indices}
for shape, indices, size_indices in [
[(3,), np.array((1,)), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(5, 3), np.array((1, 1)), (3, 1)],
[(7, 5, 3), np.array((4, 1, 0)), (2, 0, 1)],
]
for dtype in default_dtypes))
def testDynamicSlice(self, shape, dtype, indices, size_indices):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), np.array(indices)]
op = lambda x, starts: lax.dynamic_slice(x, starts, size_indices)
self._CompileAndCheck(op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_indices={}_size_indices={}".format(
jtu.format_shape_dtype_string(shape, dtype),
indices, size_indices),
"shape": shape, "dtype": dtype, "indices": indices,
"size_indices": size_indices}
for shape, indices, size_indices in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
for dtype in default_dtypes))
def testDynamicSliceAgainstNumpy(self, shape, dtype, indices, size_indices):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), np.array(indices)]
op = lambda x, s: lax.dynamic_slice(x, s, size_indices)
numpy_op = lambda x, s: lax_reference.dynamic_slice(x, s, size_indices)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
def testDynamicSliceInDim(self):
# Regression test for mixed type problem in dynamic_slice_in_dim.
rng = jtu.rand_default(self.rng())
x = rng((6, 7), np.int32)
np.testing.assert_equal(lax.dynamic_slice_in_dim(x, 2, 3), x[2:5])
def testDynamicSliceArraySliceSizes(self):
rng = jtu.rand_default(self.rng())
x = rng((6, 7), np.int32)
np.testing.assert_equal(lax.dynamic_slice(x, [2, 3], jnp.array([2, 2])),
x[2:4, 3:5])
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_indices={}_update_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype),
indices, update_shape),
"shape": shape, "dtype": dtype, "indices": indices,
"update_shape": update_shape}
for shape, indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
for dtype in default_dtypes))
def testDynamicUpdateSlice(self, shape, dtype, indices, update_shape):
rng = jtu.rand_default(self.rng())
def args_maker():
return [rng(shape, dtype), rng(update_shape, dtype), np.array(indices)]
self._CompileAndCheck(lax.dynamic_update_slice, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_indices={}_update_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype),
indices, update_shape),
"shape": shape, "dtype": dtype, "indices": indices,
"update_shape": update_shape}
for shape, indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
for dtype in default_dtypes))
def testDynamicUpdateSliceAgainstNumpy(self, shape, dtype, indices,
update_shape):
rng = jtu.rand_default(self.rng())
def args_maker():
return [rng(shape, dtype), rng(update_shape, dtype), np.array(indices)]
self._CheckAgainstNumpy(lax_reference.dynamic_update_slice,
lax.dynamic_update_slice, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_perm={}".format(
jtu.format_shape_dtype_string(shape, dtype), perm),
"shape": shape, "dtype": dtype, "perm": perm}
for shape, perm in [
[(3, 4), (1, 0)],
[(3, 4), (0, 1)],
[(3, 4, 5), (2, 1, 0)],
[(3, 4, 5), (1, 0, 2)],
]
for dtype in default_dtypes))
def testTranspose(self, shape, dtype, perm):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.transpose(x, perm)
self._CompileAndCheck(op, args_maker)
def testTransposeWithArrayPermutation(self):
x = lax.transpose(np.ones((2, 3)), jnp.array([1, 0]))
self.assertEqual((3, 2), x.shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_perm={}".format(
jtu.format_shape_dtype_string(shape, dtype), perm),
"shape": shape, "dtype": dtype, "perm": perm}
for shape, perm in [
[(3, 4), (1, 0)],
[(3, 4), (0, 1)],
[(3, 4, 5), (2, 1, 0)],
[(3, 4, 5), (1, 0, 2)],
]
for dtype in default_dtypes))
def testTransposeAgainstNumpy(self, shape, dtype, perm):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.transpose(x, perm)
numpy_op = lambda x: lax_reference.transpose(x, perm)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_inshape={}_reducedims={}_initval={}"
.format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), dims,
init_val),
"op": op, "init_val": init_val, "shape": shape, "dtype": dtype, "dims": dims}
for init_val, op, types in [
(0, lax.add, default_dtypes),
(1, lax.mul, default_dtypes),
(0, lax.max, all_dtypes), # non-monoidal
(-np.inf, lax.max, float_dtypes),
(dtypes.iinfo(np.int32).min, lax.max, [np.int32]),
(dtypes.iinfo(np.int64).min, lax.max, [np.int64]),
(np.inf, lax.min, float_dtypes),
(dtypes.iinfo(np.int32).max, lax.min, [np.int32]),
(dtypes.iinfo(np.int64).max, lax.min, [np.int64]),
(dtypes.iinfo(np.uint32).max, lax.min, [np.uint32]),
(dtypes.iinfo(np.uint64).max, lax.min, [np.uint64]),
]
for dtype in types
for shape, dims in [
[(3, 4, 5), (0,)], [(3, 4, 5), (1, 2)],
[(3, 4, 5), (0, 2)], [(3, 4, 5), (0, 1, 2)]
]))
def testReduce(self, op, init_val, shape, dtype, dims):
rng_factory = (jtu.rand_default if dtypes.issubdtype(dtype, np.integer)
else jtu.rand_small)
rng = rng_factory(self.rng())
init_val = np.asarray(init_val, dtype=dtype)
fun = lambda operand, init_val: lax.reduce(operand, init_val, op, dims)
args_maker = lambda: [rng(shape, dtype), init_val]
self._CompileAndCheck(fun, args_maker)
# we separately test the version that uses a concrete init_val because it
# can hit different code paths
fun = lambda operand: lax.reduce(operand, init_val, op, dims)
args_maker = lambda: [rng(shape, dtype)]
self._CompileAndCheck(fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}.{}_arr_weak_type={}_init_weak_type={}"
.format(op_namespace.__name__, op, arr_weak_type, init_weak_type),
"op": op, "op_namespace": op_namespace, "arr_weak_type": arr_weak_type, "init_weak_type": init_weak_type}
for op in ["add", "mul"]
for op_namespace in [lax, operator]
for arr_weak_type in [True, False]
for init_weak_type in [True, False]))
def testReduceWeakType(self, op_namespace, op, arr_weak_type, init_weak_type):
op = getattr(op_namespace, op)
arr = lax._convert_element_type( | np.arange(10) | numpy.arange |
import controlSBML.constants as cn
import controlSBML as ctl
from controlSBML.option_management.option_manager import OptionManager
from controlSBML.option_management.options import Options
from docstring_expander.expander import Expander
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import urllib.request
REPO_URL = "https://github.com/ModelEngineering/controlSBML/raw/main"
MODULE_URLPAT = REPO_URL + "/controlSBML/%s.py" # Pattern for module URL
MODEL_URLPAT = REPO_URL + "/models/%s"
MODEL_823_FILE = "biomodels_823.ant"
LOCAL_FILE = "local.txt"
def calculateMatrixDistance(mat1, mat2):
"""
Calculates the distance between two matrices with the same shape.
Parameters
----------
mat1 - np.array
mat2 - np.array
Returns
-------
float
"""
if | np.shape(mat1) | numpy.shape |
"""Write a python module (e.g. function) that returns movies randomly"""
import numpy as np
from sklearn.decomposition import NMF
import pandas as pd
import random
import pickle
MOVIES = [x[:-7] for x in (pd.read_csv('movies.csv'))['title'].values.tolist()]
#MOVIES = ['Shawshank Redemption',
# 'Wizard of Oz',
# 'Pulp Fiction',
# 'Kill Bill',
# 'Rings, The Lord of (2002)',
# 'RegEx: The Movie',
# 'FuzzyWuzzy Bubbly Buddy',
# 'Docker: Unleashed',
# 'Docker: FML',
# 'Flask Fun',
# 'Django Girls Unchained']
# Add userinput as parameter to this FUNCTION and give the funcion a user user_input
# transform user input so that it is a list of 3 with length equal to number of columns
# replace the columns that represent the user id with the values the user has given
# give that resulting list to line 40 (P = model.transform(user_input2))
# nR is going to be the new recommendation list
# find top values in nR (np.argsort or np.argmax)
# find the index (or movieId) of those columns
# find the name for those movieIds (iloc) or pandas.columns
def random_recommend(user_input_movies, user_input_ratings):
ratings = pd.read_csv('ratings.csv')
movies = pd.read_csv('movies.csv')
moviesIdlist=movies['movieId'].tolist()
MOVIES = [x[:-7] for x in (pd.read_csv('movies.csv'))['title'].values.tolist()]
MOVIEID = [x for x in (pd.read_csv('movies.csv'))['movieId'].values.tolist()]
movies['title'] = MOVIES
ratings2=ratings.set_index(['userId'])
ratings3 = ratings.drop(['timestamp'], axis=1)
newmoviesratings = pd.merge(ratings3, movies, on='movieId')
newmoviesratings2=newmoviesratings[['userId','title','rating']]
# newf = newmoviesratings2.set_index('userId')
newf = ratings3.pivot(index='userId', columns='movieId')
actualmovieIdslist=ratings3['movieId'].tolist()
newmoviesIDlist = [x for x in moviesIdlist if x in actualmovieIdslist]
newf2=newf.fillna(3.000000)
R=newf2.values
model = pickle.load(open('finalized_model.sav', 'rb'))
Q = model.components_
P = model.transform(R)
nR = | np.dot(P, Q) | numpy.dot |
# -*- coding: utf-8 -*-
"""
Created on 2020/8/12
@project: SPAIC
@filename: Monitor
@author: <NAME>
@contact: <EMAIL>
@description:
定义神经集群放电以及神经元状态量、连接状态量的仿真记录模块
"""
from ..Network.Assembly import BaseModule, Assembly
from ..Network.Connection import Connection
from ..Backend.Backend import Backend
import numpy as np
import torch
class Monitor(BaseModule):
def __init__(self, target, var_name, index='full', dt=None, get_grad=False, nbatch=True):
super().__init__()
if isinstance(target, Assembly):
self.target = target
self.target_type = 'Assembly'
elif isinstance(target, Connection):
self.target = target
self.target_type = 'Connection'
elif target == None:
self.target = None
self.target_type = None
else:
raise ValueError("The target does not belong to types that can be watched (Assembly, Connection).")
self.var_name = '{'+var_name+'}'
self.index = index
self.var_container = None
self.get_grad = get_grad
self.nbatch = nbatch
self._nbatch_records = [] # all time window's record
self._nbatch_times = []
self._records = [] # single time window's record
self._times = []
self.dt = dt
self.is_recording = True
self.new_record = True
def check_var_name(self, var_name):
'''
Check if variable is in the traget model, and add the target id label to the variable name.
Parameters
----------
var_name : original variable name
Returns : modified variable name
-------
'''
tar_var_name = None
if var_name[1:-1] in self.backend._variables.keys():
tar_var_name = var_name[1:-1]
else:
for tar_name in self.target.get_var_names(): # 没有中间变量
if var_name in tar_name:
tar_var_name = tar_name
break
if tar_var_name is not None:
return tar_var_name
else:
raise ValueError(" Variable %s is not in the target model"%var_name)
def get_str(self, level):
pass
def monitor_on(self):
self.is_recording = True
def monitor_off(self):
self.is_recording = False
def clear(self):
NotImplementedError()
def build(self, backend: Backend):
NotImplementedError()
def init_record(self):
NotImplementedError()
def update_step(self):
NotImplementedError()
def push_data(self, data, time):
"push data to monitor by backend"
self._records.append(data)
self._times.append(time)
class SpikeMonitor(Monitor):
def __init__(self, target, var_name='O', index='full', dt=None, get_grad=False, nbatch=False):
super().__init__(target=target, var_name=var_name, index=index, dt=dt, get_grad=get_grad, nbatch=nbatch)
self._transform_len = 0
self._nbatch_index = [] # all time window's record
self._nbatch_times = []
self._spk_index = []
self._spk_times = []
self._records = [] # single time window's record
self._times = []
def build(self, backend: Backend):
self.backend = backend
self.backend._monitors.append(self)
self.var_name = self.check_var_name(self.var_name)
self.shape = self.backend._variables[self.var_name].shape
if self.dt is None:
self.dt = self.backend.dt
def clear(self):
self._transform_len = -1
self._nbatch_index = [] # all time window's record
self._nbatch_times = []
self._spk_index = []
self._spk_times = []
self._records = [] # single time window's record
self._times = []
def init_record(self):
self.new_record = True
if len(self._spk_index) > 0:
if self.nbatch is True:
if isinstance(self._spk_index[0], torch.Tensor):
self._nbatch_index.append(torch.stack(self._spk_index[1:], dim=-1).cpu().detach().numpy())
else:
self._nbatch_index.append(np.stack(self._spk_index[1:], axis=-1))
self._nbatch_times.append(self._times[1:])
elif self.nbatch > 0:
if isinstance(self._spk_index[0], torch.Tensor):
self._nbatch_index.append(torch.stack(self._spk_index[1:], dim=-1).cpu().detach().numpy())
else:
self._nbatch_index.append(np.stack(self._spk_index[1:], axis=-1))
self._nbatch_times.append(self._times[1:])
if len(self._nbatch_times) > self.nbatch:
self._nbatch_index = self._nbatch_index[-self.nbatch:]
self._nbatch_times = self._nbatch_times[-self.nbatch:]
self._records = [] # single time window's record
self._times = []
self._transform_len = -1
def push_spike_train(self, spk_times, spk_index, batch_index=0):
if len(self._spk_index) < batch_index+1:
add_num = batch_index + 1 - len(self._spk_index)
for _ in range(add_num):
self._spk_index.append([])
self._spk_times.append([])
if isinstance(spk_times, list) or isinstance(spk_times, tuple):
self._spk_times[batch_index].extend(spk_times)
self._spk_index[batch_index].extend(spk_index)
else:
self._spk_times[batch_index].append(spk_times)
self._spk_index[batch_index].append(spk_index)
#to override the _spike_transform function when getting spk_times and spk_index
self._transform_len = 1
def update_step(self, variables):
'''
Recoding the variable values of the current step.
Returns
-------
'''
if self.is_recording is False:
return
if int(10000 * self.backend.time / self.dt) % 10000 == 0:
record_value = variables[self.var_name]
if self.get_grad:
variables[self.var_name].retain_grad()
if self.index == 'full':
self._records.append(record_value)
self._times.append(self.backend.time)
else:
if len(self.index) == record_value.ndim:
self._records.append(record_value[self.index])
self._times.append(self.backend.time)
else:
assert len(self.index) == record_value.ndim -1
if self.backend.backend_name == 'pytorch':
record_value = torch.movedim(record_value, 0, -1)
indexed_value = record_value[tuple(self.index)]
indexed_value = torch.movedim(indexed_value, -1, 0)
else:
record_value = np.array(record_value)
record_value = np.moveaxis(record_value, 0, -1)
indexed_value = record_value[tuple(self.index)]
indexed_value = np.moveaxis(indexed_value, -1, 0)
self._records.append(indexed_value)
self._times.append(self.backend.time)
def _spike_transform(self):
batch_size = self.backend.get_batch_size()
if len(self._records) > self._transform_len:
self._transform_len = len(self._records)
self._spk_index = []
self._spk_times = []
if isinstance(self._records[0], torch.Tensor):
step = len(self._records)
rec_spikes = torch.stack(self._records, dim=-1).cpu().detach()
if '{[2]' in self.var_name:
for ii in range(batch_size):
rec_spikes_i = rec_spikes[ii,0,...].bool().reshape(-1)
rec_spikes_t = rec_spikes[ii,1,...].reshape(-1)
num = int(rec_spikes_i.size(0)/step)
time_seq = torch.tensor(self._times).unsqueeze(dim=0).expand(num, -1).reshape(-1)
indx_seq = torch.arange(0, num).unsqueeze(dim=1).expand(-1, step).reshape(-1)
time_seq = (torch.masked_select(time_seq - rec_spikes_t, rec_spikes_i) ).numpy()
indx_seq = torch.masked_select(indx_seq, rec_spikes_i).numpy()
self._spk_index.append(indx_seq)
self._spk_times.append(time_seq)
else:
for ii in range(batch_size):
rec_spikes_i = rec_spikes[ii,...].bool().reshape(-1)
num = int(rec_spikes_i.size(0)/step)
time_seq = torch.tensor(self._times).unsqueeze(dim=0).expand(num, -1).reshape(-1)
indx_seq = torch.arange(0, num).unsqueeze(dim=1).expand(-1, step).reshape(-1)
time_seq = torch.masked_select(time_seq, rec_spikes_i).numpy()
indx_seq = torch.masked_select(indx_seq, rec_spikes_i).numpy()
self._spk_index.append(indx_seq)
self._spk_times.append(time_seq)
@property
def spk_times(self):
self._spike_transform()
return self._spk_times
@property
def spk_index(self):
self._spike_transform()
return self._spk_index
@property
def spk_grad(self):
pass
return None
@property
def time_spk_rate(self):
if isinstance(self._records[0], torch.Tensor):
if '{[2]' in self.var_name:
spike = torch.stack(self._records, dim=-1).cpu().detach()[:,0,...]
else:
spike = torch.stack(self._records, dim=-1).cpu().detach()
return torch.mean(spike, dim=0).numpy()
else:
if '{[2]' in self.var_name:
spike = np.stack(self._records, axis=-1)[:,0,...]
else:
spike = np.stack(self._records, axis=-1)
return np.mean(spike, axis=0).numpy()
@property
def time(self):
return np.stack(self._times, axis=-1)
class StateMonitor(Monitor):
def __init__(self, target, var_name, index='full', dt=None, get_grad=False, nbatch=False):
# TODO: 初始化有点繁琐,需要知道record的变量,考虑采用更直接的监控函数
super().__init__(target=target, var_name=var_name, index=index, dt=dt, get_grad=get_grad, nbatch=nbatch)
self._nbatch_records = [] # all time window's record
self._nbatch_times = []
self._records = [] # single time window's record
self._times = []
def build(self, backend: Backend):
self.backend = backend
self.backend._monitors.append(self)
self.var_name = self.check_var_name(self.var_name)
if self.index != 'full':
self.index = tuple(self.index)
if self.dt is None:
self.dt = self.backend.dt
def clear(self):
self._nbatch_records = [] # all time window's record
self._nbatch_times = []
self._records = [] # single time window's record
self._times = []
def init_record(self):
'''
Inite record of new trial
Returns:
'''
self.new_record = True
if len(self._records) > 0:
if self.nbatch is True:
if isinstance(self._records[0], torch.Tensor):
self._nbatch_records.append(torch.stack(self._records, dim=-1).cpu().detach().numpy())
else:
self._nbatch_records.append(np.stack(self._records, axis=-1))
self._nbatch_times.append(self._times)
elif self.nbatch > 0:
if isinstance(self._records[0], torch.Tensor):
self._nbatch_records.append(torch.stack(self._records, dim=-1).cpu().detach().numpy())
else:
self._nbatch_records.append( | np.stack(self._records, axis=-1) | numpy.stack |
import numpy as np
from scipy.optimize import curve_fit
from scipy.optimize import fsolve, brentq
from scipy.interpolate import interp1d
import scipy.integrate
import sys
import os
import writeproperties.velociraptor_python_tools as vpt
from scipy.spatial import cKDTree
import h5py
import re
from constants import *
from snapshot import *
import copy
import itertools
def getHaloCoord(catalog, halo, z=0): #Mpc/h
coords = np.zeros(3)
coords[0] = (catalog['Xcmbp'][halo]+catalog['Xc'][halo])*h*(1+z)
coords[1] = (catalog['Ycmbp'][halo]+catalog['Yc'][halo])*h*(1+z)
coords[2] = (catalog['Zcmbp'][halo]+catalog['Zc'][halo])*h*(1+z)
return coords
def getHaloRadius(catalog, halo, z=0): #Mpc/h
return catalog['R_200crit'][halo]*h*(1+z)
def getHaloCoordCOM(catalog, halo, z=0): #Mpc/h
coords = np.zeros(3)
coords[0] = catalog['Xc'][halo]*h*(1+z)
coords[1] = catalog['Yc'][halo]*h*(1+z)
coords[2] = catalog['Zc'][halo]*h*(1+z)
return coords
def readHaloFile(halofile):
atime,tree,numhalos,halodata,cosmodata,unitdata = vpt.ReadUnifiedTreeandHaloCatalog(halofile, desiredfields=[], icombinedfile=1,iverbose=0)
return atime,tree,numhalos,halodata,cosmodata,unitdata
def findSurroundingHaloProperties(hp, halolist, d_snap, boxsize=32.):
coords = hp['Coord']
halotree = cKDTree(coords, boxsize=boxsize)
for k in halolist:
if hp['R200'][k] == -1:
continue
halostring = hp['HaloIndex'][k]
length_of_neighbours = len(np.array(halotree.query_ball_point([hp['Coord'][k]], r=hp['R200'][k]*5)[0]))
distance, indices = halotree.query([hp['Coord'][k]], k=length_of_neighbours)
indices = np.array(indices[0])[1:]
distance = np.array(distance[0])[1:]
hp['Neighbours'][halostring] = hp['HaloIndex'][indices]
hp['Neighbour_distance'][halostring] = distance
hp['Neighbour_Velrad'][halostring] = np.zeros(len(distance))
j=0
for i in indices:
partindices = hp['Partindices'][hp['HaloIndex'][i]]
hp['Neighbour_Velrad'][halostring][j] = np.sum(d_snap['File'].get_radialvelocity(hp['Coord'][k], indices=partindices))/len(partindices)
j+=1
def fixSatelliteProblems(hp, Hydro=False, TEMPORALHALOIDVAL=1000000000000):
halotree = cKDTree(hp['Coord'], boxsize=32)
toolarge = np.where(hp['R200'] > hp['R200'][0]*1.2)[0]
#print(i, toolarge)
if len(toolarge) != 0:
for tl in toolarge:
hp['M200'][tl] = -1
hp['R200'][tl] = -1
if Hydro:
hp['DMFraction'][tl] = -1
hp['hostHaloIndex'][hp['HaloIndex'][tl]==hp['hostHaloIndex']] = -2
for halo in range(len(hp['M200'])):
if hp['M200'][halo] == -1:
continue
buren = np.array(halotree.query_ball_point(hp['Coord'][halo], r = 2*hp['R200'][halo]))
if len(buren) <= 1:
continue
buren = buren[hp['R200'][buren] != -1]
i_largest = np.argmax(hp['n_part'][buren])
index_largest = buren[i_largest]
buren = np.delete(buren,i_largest)
coords = hp['Coord'][buren] - hp['Coord'][index_largest]
coords = np.where(np.abs(coords) > 0.5*32, coords - coords/np.abs(coords)*32, coords)
rad = np.sqrt(np.sum(coords*coords, axis=1))
burentemp = np.where(hp['R200'][buren]-rad+hp['R200'][index_largest] > 0)[0]
if len(burentemp) == 0:
continue
buren = buren[burentemp]
hp['hostHaloIndex'][buren] = index_largest
hp['M200'][buren] = -1
hp['R200'][buren] = -1
if Hydro:
hp['DMFraction'][buren] = -1
def findSubHaloFraction(hp, catalog):
if len(hp['hostHaloIndex']) < 10:
hp['Msub'] = np.zeros(len(hp['M200']))
return 0
i_hostH = np.where(hp['hostHaloIndex'] > -1)[0]
hp['Msub'] = np.zeros(len(hp['M200']))
for i in i_hostH:
isattemp = np.where(hp['HaloID'][i] == catalog['ID'])[0]
hp['Msub'][hp['hostHaloIndex'][i]] += catalog['Mass_FOF'][isattemp]
def buildHaloDictionary(Hydro=False, multiple=False, mainBranch=False):
haloproperties = {}
if Hydro:
haloarray = (['HaloIndex', 'HaloID', 'Coord', 'R200', 'M200', 'redshift', 'snapshot',
'lambda', 'lambdaDM', 'lambdaH', 'DensityDM', 'DensityH', 'Npart',
'DMpartIDs', 'Partindices', 'HpartIDs', 'NpartDM_profile', 'Npart_profile', 'DMFraction', 'DMFraction_profile', 'MassH_profile',
'VelradDM', 'Velrad', 'VelradH', 'Vel', 'Temperature', 'Mass_profile', 'MassDM_profile', 'COM_offset',
'AngularMomentumDM', 'AngularMomentumH', 'AngularMomentum', 'Radius', 'MaxRadIndex', 'hostHaloIndex', 'n_part', 'Msub', 'CrossTime',
'Virial_ratio'])
else:
haloarray = (['HaloIndex', 'HaloID', 'Coord', 'R200', 'M200', 'redshift', 'snapshot', 'DMpartIDs', 'lambda', 'Density', 'Npart',
'AngularMomentum', 'Npart_profile', 'Radius', 'Velrad', 'Vel', 'MassDM_profile', 'Partindices', 'n_part', 'MaxRadIndex',
'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'hostHaloIndex'])
if mainBranch:
haloarray.append('Head')
haloarray.append('Tail')
haloarray.append('TreeBool')
for key in haloarray:
if multiple and (key == 'DMpartIDs' or key == 'HpartIDs' or key=='Partindices' or key=='Neighbours' or key=='Neighbour_distance' or key=='Neighbour_Velrad'):
haloproperties[key] = {}
else:
haloproperties[key] = np.zeros(0)
return haloproperties
def findHaloPropertiesInSnap(catalog, snappath, snapshot, particledata=[], Hydro=False, Nhalo=100,
startHalo=0, softeningLength=0.002, r200fac=1., mass=False, partlim=200, savePartData=False):
print("Computing properties for %i haloes in snapshot %i" %(Nhalo, snapshot))
haloproperties = buildHaloDictionary(Hydro = Hydro, multiple=True)
if len(catalog['Mass_tot']) == 0:
return haloproperties
sortorder = np.argsort(catalog['Mass_tot'][:])
d_snap = {}
d_snap['snapshot'] = snapshot
limiet = 0
if Hydro:
d_snap['File'] = Snapshot(snappath, snapshot, useIDs=False, partType=7, softeningLength=softeningLength)
d_snap['File'].makeCoordTree()
else:
d_snap['File'] = Snapshot(snappath, snapshot, useIDs=False, partType=1, softeningLength=softeningLength)
d_snap['File'].makeCoordTree()
for key in catalog.keys():
catalog[key][:] = catalog[key][sortorder]
catalog[key][:] = catalog[key][::-1]
if len(particledata) > 0:
mass=True
for key in particledata.keys():
particledata[key][:] = np.array(particledata[key])[sortorder]
particledata[key][:] = particledata[key][::-1]
for halo in range(startHalo, startHalo+Nhalo):
masshier = False
if mass:
masshier=catalog['Mass_200crit'][halo]*h
if masshier <= 0.000001:
startHalo += 1
limiet += 1
continue
if halo > len(catalog['Xc'])-1 or limiet > 500:
print("Halo limit reached: nhalo = %i, hlim = %i" %(halo, limiet))
print("Coordinates: ", coords)
break
halopropertiestemp = {}
coords = getHaloCoord(catalog, halo, z=d_snap['File'].redshift)
if coords[0] < 0 or coords[1] < 0 or coords[2] < 0 or coords[0]%32 < 0.5 or coords[1]%32 < 0.5 or coords[2]%32 < 0.5:
startHalo += 1
continue
coords = coords%32
radhier = getHaloRadius(catalog, halo, z=d_snap['File'].redshift)
if ((coords[0] < 2.*radhier) or (coords[1] < 2.*radhier) or (coords[2] < 2.*radhier)
or (np.abs(coords[0]-32) < 2.*radhier) or (np.abs(coords[1]-32) < 2.*radhier)
or (np.abs(coords[2]-32) < 2.*radhier)):
startHalo += 1
continue
if (catalog['hostHaloID'][halo] != -1) or (catalog['npart'][halo] < 20) or (catalog['Mass_200crit'][halo]*h == 0):# or ((catalog['Mass_200crit'][halo]/4.e3)**(1./3) < catalog['R_200crit'][halo]):
startHalo += 1
continue
halopropertiestemp = findHaloProperties(halo, catalog, d_snap, particledata=particledata, Hydro=Hydro, r200fac=r200fac, mass=masshier, partlim=partlim)
if len(halopropertiestemp) == 0:
startHalo += 1
limiet += 1
continue
if halopropertiestemp['Npart'] < partlim:
startHalo += 1
limiet += 1
continue
limiet = 0
for key in haloproperties.keys():
if key == 'Neighbours' or key == 'Neighbour_distance' or key == 'Neighbour_Velrad':
iets = 1
elif key == 'DMpartIDs' or key == 'HpartIDs' or key=='Partindices':
if key=='Partindices':
haloproperties[key][halopropertiestemp['HaloIndex']] = halopropertiestemp[key][:]
elif savePartData:
haloproperties[key][halopropertiestemp['HaloIndex']] = halopropertiestemp[key][:]
elif halo == startHalo:
haloproperties[key] = [halopropertiestemp[key]]
else:
haloproperties[key] = np.concatenate((haloproperties[key], [halopropertiestemp[key]]))
#if startHalo + Nhalo >= len(catalog['npart']) and len(haloproperties['Npart']) > partlim:
print("Compute temperatures outside haloes...")
everythingOutside(haloproperties, d_snap)
print("finding surrounding haloes...")
if len(haloproperties['M200']) > 100:
findSurroundingHaloProperties(haloproperties, np.arange(0, 100, 1).astype(int), d_snap)
return haloproperties
def findHaloPropertiesInSnap_fromUnifiedTreeCatalog(catalog, snappath, snapshot, Hydro=False, Nhalo=100,
startHalo=0, softeningLength=0.002, Radius=1., partlim=200, savePartData=False, sortorder=[],
boxsize=32, TEMPORALHALOIDVAL=1000000000000):
print("Computing properties for %i haloes in snapshot %i" %(Nhalo, snapshot))
haloproperties = buildHaloDictionary(Hydro = Hydro, multiple=True, mainBranch=True)
if len(catalog['Mass_tot']) == 0:
return haloproperties
if len(sortorder)==0:
sortorder = np.argsort(catalog['Mass_tot'][:])[::-1]
sortorderinvert = np.argsort(sortorder)
else:
sortorderinvert = np.argsort(sortorder)
d_snap = {}
d_snap['snapshot'] = snapshot
limiet = 0
if Hydro:
d_snap['File'] = Snapshot(snappath, snapshot, useIDs=False, partType=7, softeningLength=softeningLength)
d_snap['File'].makeCoordTree()
else:
d_snap['File'] = Snapshot(snappath, snapshot, useIDs=False, partType=1, softeningLength=softeningLength)
d_snap['File'].makeCoordTree()
for key in catalog.keys():
catalog[key][:] = catalog[key][sortorder]
#catalog[key][:] = catalog[key][::-1]
#haloproperties['TreeBool'] = np.ones(len(tails), dtype=int)
for halo in range(startHalo, startHalo+Nhalo):
#start_time = time.time()
#print(halo)
#print(catalog['npart'][halo])
if halo%1000==0:
print('Computing properties for halo %i-%i' %(halo, halo+1000))
if halo > len(catalog['Xc'])-1:
print("Halo limit reached: nhalo = %i, hlim = %i" %(halo, limiet))
print("Coordinates: ", coords)
break
if limiet > 500: #Only computing sats
if catalog['hostHaloID'][halo] == -1:
# haloproperties['TreeBool'][halo] = 0
continue
halopropertiestemp = {}
coords = getHaloCoord(catalog, halo, z=d_snap['File'].redshift)
coords = coords%32
radhier = getHaloRadius(catalog, halo, z=d_snap['File'].redshift)
satellite = False
if (catalog['npart'][halo] < 20) or (catalog['Mass_200crit'][halo]*h == 0):
startHalo += 1
# haloproperties['TreeBool'][halo] = 0
continue
if (catalog['hostHaloID'][halo] != -1) and len(haloproperties['HaloID'])>1:
haloindextemp = np.where((haloproperties['HaloID']%TEMPORALHALOIDVAL)==catalog['hostHaloID'][halo]%TEMPORALHALOIDVAL)[0]
if len(haloindextemp) == 0:
hostHaloIDtemp = -1
if catalog['npart'][halo] < 50:
hostHaloIDtemp = -2
satellite = True
else:
afstandtemp = (haloproperties['Coord'][haloindextemp[0]]-coords)
afstandtemp = np.where(np.abs(afstandtemp)>0.5*boxsize, np.abs(afstandtemp) - boxsize, afstandtemp)
afstandtemp = (np.sum(afstandtemp*afstandtemp))**0.5
if afstandtemp < haloproperties['R200'][haloindextemp[0]]: # and catalog['npart'][halo] > 50:
#print(afstandtemp, haloproperties['R200'][haloindextemp[0]], haloproperties['Coord'][haloindextemp[0]], coords)
hostHaloIDtemp = haloindextemp[0]
satellite = True
else:
#print(afstandtemp, haloproperties['R200'][haloindextemp[0]], haloproperties['Coord'][haloindextemp[0]], coords)
hostHaloIDtemp = -1
else:
hostHaloIDtemp = -1
#All happens here
halopropertiestemp = findHaloPropertiesFixedRadius(d_snap, halo, coords, np.logspace(-3, 0, 60)*Radius,
Hydro=Hydro, rad=radhier, mass=False, satellite=satellite, mainBranch=True)
#print("--- %s seconds ---" % (time.time() - start_time), 'halopropertiestemp computed')
if len(halopropertiestemp) == 0:
startHalo += 1
limiet += 1
# haloproperties['TreeBool'][halo] = 0
continue
if satellite == False and halopropertiestemp['Npart'] < partlim:
startHalo += 1
limiet += 1
# haloproperties['TreeBool'][halo] = 0
continue
limiet = 0
if satellite:
halopropertiestemp['Npart'] = catalog['npart'][halo]
#start_time = time.time()
halopropertiestemp['n_part'] = catalog['npart'][halo]
halopropertiestemp['HaloID'] = catalog['ID'][halo]
halopropertiestemp['hostHaloIndex'] = hostHaloIDtemp
if not satellite:
afstandtemp = coords - getHaloCoordCOM(catalog, halo, z=d_snap['File'].redshift)
rhier = np.where(np.abs(afstandtemp)>0.5*boxsize, np.abs(afstandtemp) - boxsize, afstandtemp)
halopropertiestemp['COM_offset'] = np.sqrt(np.sum(rhier**2))/halopropertiestemp['R200']
halopropertiestemp['CrossTime'] = (2.*halopropertiestemp['R200']*Mpc_to_km /
np.sqrt(G_Mpc_km2_Msi_si2*halopropertiestemp['M200']*1e10/halopropertiestemp['R200']))*s_to_yr/1.e6
else:
halopropertiestemp['COM_offset'] = -1
halopropertiestemp['CrossTime'] = -1
for key in haloproperties.keys():
if key == 'TreeBool' or key == 'Tail' or key == 'Head' or key == 'Radius':
continue
elif key == 'Neighbours' or key == 'Neighbour_distance' or key == 'Neighbour_Velrad':
continue
elif key == 'DMpartIDs' or key == 'HpartIDs' or key=='Partindices':
if key=='Partindices':
haloproperties[key][halopropertiestemp['HaloIndex']] = halopropertiestemp[key][:]
elif savePartData:
haloproperties[key][halopropertiestemp['HaloIndex']] = halopropertiestemp[key][:]
elif halo == startHalo:
haloproperties[key] = [halopropertiestemp[key]]
else:
#print(key)
haloproperties[key] = np.concatenate((haloproperties[key], [halopropertiestemp[key]]))
#print("--- %s seconds ---" % (time.time() - start_time), 'haloproperties updated')
# print("Correcting hostHaloIndices...")
# for i in range(len(haloproperties['HaloID'])):
# if (haloproperties['hostHaloIndex'][i] != -1):
# haloindextemp = np.where((haloproperties['HaloID']%TEMPORALHALOIDVAL)==haloproperties['hostHaloIndex'][i]%TEMPORALHALOIDVAL)[0]
# if len(haloindextemp) == 0:
# haloproperties['hostHaloIndex'][i] = -2
# else:
# haloproperties['hostHaloIndex'][i] = haloindextemp[0]
# else:
# haloproperties['hostHaloIndex'][i] = -1
# haloproperties['Tail'] = tails
# haloproperties['Head'] = heads
haloproperties['Radius'] = | np.logspace(-3, 0, 60) | numpy.logspace |
#!/usr/bin/env python
'''
Filename: test_flags.py
Description: unit tests to test flag_timepoints.py
'''
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'prototype'
# standard imports
import os
import sys
import unittest
import numpy as np
HERE = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.abspath(HERE +'/../../importing'))
from flags_and_breaks import *
class TestFlagTimepoints(unittest.TestCase):
def test_consolidate_all_true_flags(self):
all_flags = {'f1': [True, True, True],
'f2': [True, True, True],}
flags = consolidate_flags(all_flags)
soln = [True, True, True]
self.assertEqual(flags, soln)
def test_consolidate_all_False_flags(self):
all_flags = {'f1': [False, False],
'f2': [False, False],}
flags = consolidate_flags(all_flags)
soln = [False, False]
self.assertEqual(flags, soln)
def test_consolidate_mixed_flags(self):
all_flags = {'f1': [False, True, True, True],
'f2': [False, True, True, True],
'f2': [True, True, False, True]}
flags = consolidate_flags(all_flags)
soln = [False, True, False, True]
self.assertEqual(flags, soln)
def test_flag_no_outliers(self):
fail_num = 0
for i in range(10):
normal_dataset = np.random.normal(loc=5.0, scale=1.0, size=1000)
flags = flag_outliers(normal_dataset)
soln = [True for i in range(1000)]
if flags != soln:
fail_num += 1
self.assertTrue(fail_num <= 2)
def test_flag_lage_and_small_outliers(self):
N_normal, N_out1, N_out2 = 200, 50, 20
normal_dataset = np.random.normal(loc=10.0, scale=1.0, size=N_normal)
outliers1 = np.random.normal(loc=20.0, scale=1.0, size=N_out1)
outliers2 = np.random.normal(loc=1.0, scale=0.5, size=N_out2)
test_set = list(normal_dataset) + list(outliers1) + list(outliers2)
flags = flag_outliers(test_set)
soln = ([True] * N_normal) + ([False] * (N_out1 + N_out2))
self.assertEqual(flags, soln)
def test_flag_just_small_outliers(self):
N_normal, N_out1, N_out2 = 200, 10, 30
normal_dataset = np.random.normal(loc=10.0, scale=1.0, size=N_normal)
outliers1 = np.random.normal(loc=20.0, scale=1.0, size=N_out1)
outliers2 = np.random.normal(loc=1.0, scale=0.5, size=N_out2)
test_set = list(normal_dataset) + list(outliers1) + list(outliers2)
flags = flag_outliers(test_set, options='short')
soln = ([True] * (N_normal+N_out1)) + ([False] * N_out2)
self.assertEqual(flags, soln)
def test_flag_just_large_outliers(self):
N_normal, N_out1, N_out2 = 200, 10, 20
normal_dataset = np.random.normal(loc=10.0, scale=1.0, size=N_normal)
outliers1 = np.random.normal(loc=20.0, scale=1.0, size=N_out1)
outliers2 = np.random.normal(loc=1.0, scale=0.5, size=N_out2)
test_set = list(normal_dataset) + list(outliers1) + list(outliers2)
flags = flag_outliers(test_set, options='long')
soln = [True] * N_normal + [False] * N_out1 + [True] * N_out2
self.assertEqual(flags, soln)
def test_flag_outliers_with_nulls(self):
N_normal = 1000
normal_dataset = | np.random.normal(loc=10.0, scale=1.0, size=N_normal) | numpy.random.normal |
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
import numpy as np
from scipy.optimize import curve_fit
from auspex.log import logger
from copy import copy
import matplotlib.pyplot as plt
from .fits import AuspexFit, Auspex2DFit
from .signal_analysis import KT_estimation
class RabiAmpFit(AuspexFit):
"""A fit to a Rabi amplitude curve, assuming a cosine model.
"""
xlabel = "Amplitude"
ylabel = r"<$\sigma_z$>"
title = "Rabi Amp Fit"
@staticmethod
def _model(x, *p):
return p[0] - p[1]*np.cos(2*np.pi*p[2]*(x - p[3]))
def _initial_guess(self):
#seed Rabi frequency from largest FFT component
N = len(self.ypts)
yfft = np.fft.fft(self.ypts)
f_max_ind = np.argmax(np.abs(yfft[1:N//2]))
f_0 = 0.5 * max([1, f_max_ind]) / self.xpts[-1]
amp_0 = 0.5*(self.ypts.max() - self.ypts.min())
offset_0 = np.mean(self.ypts)
phase_0 = 0
if self.ypts[N//2 - 1] > offset_0:
amp_0 = -amp_0
return [offset_0, amp_0, f_0, phase_0]
def _fit_dict(self, p):
return {"y0": p[0],
"Api": p[1],
"f": p[2],
"phi": p[3]}
def __str__(self):
return "y0 - Api*cos(2*pi*f*(t - phi))"
@property
def pi_amp(self):
"""Returns the pi-pulse amplitude of the fit.
"""
return 0.5/self.fit_params["f"]
def annotation(self):
return r"$A_\pi$ = {0:.2e} {1} {2:.2e}".format(self.pi_amp, chr(177), self.fit_errors["Api"])
class RabiWidthFit(AuspexFit):
"""Fit to a single-frequency decaying cosine for fitting Rabi-vs-time experiments
"""
xlabel = "Delay"
ylabel = r"<$\sigma_z$>"
title = "Rabi Width Fit"
@staticmethod
def _model(x, *p):
return p[0] + p[1]*np.exp(-x/p[2])*np.cos(2*np.pi*p[3]*(x - p[4]))
def _initial_guess(self):
frabi, Tcs, amps = KT_estimation(self.ypts-np.mean(self.ypts), self.xpts, 1)
offset = np.average(self.xpts)
amp = np.max(self.ypts)
trabi = self.xpts[np.size(self.ypts) // 3]# assume Trabi is 1/3 of the scan
phase = 90.0
return [offset, amp, trabi, frabi[0], phase]
def _fit_dict(self, p):
return {"y0": p[0],
"A": p[1],
'T': p[2],
"f": p[3],
"phi": p[4]}
def __str__(self):
return "y0 + A*exp(-x/T)*cos(2*pi*f*(t - phi))"
@property
def t_rabi(self):
return self.fit_params["T"]
def annotation(self):
return r"$T_\pi$ = {0:.2e} {1} {2:.2e}".format(self.fit_params["T"], chr(177), self.fit_errors["T"])
class T1Fit(AuspexFit):
"""Fit to a decaying exponential for T1 measurement experiments.
"""
xlabel = "Delay"
ylabel = r"<$\sigma_z$>"
title = r"$T_1$ Fit"
@staticmethod
def _model(x, *p):
return p[0]*np.exp(-x/p[1]) + p[2]
def _initial_guess(self):
## Initial guess using method of linear regression via integral equations
## https://www.scribd.com/doc/14674814/Regressions-et-equations-integrales
N = len(self.xpts)
S = np.zeros(N)
for j in range(2, N):
S[j] = S[j-1] + 0.5*((self.ypts[j] + self.ypts[j-1]) *
(self.xpts[j] - self.xpts[j-1]))
xs = self.xpts - self.xpts[0]
ys = self.ypts - self.ypts[0]
M = np.array([[np.sum(xs**2), np.sum(xs * S)], [np.sum(xs * S), np.sum(S**2)]])
B1 = (np.linalg.inv(M) @ np.array([np.sum(ys * xs), np.sum(ys * S)]).T)[1]
theta = np.exp(B1 * self.xpts)
M2 = np.array([[N, np.sum(theta)], [np.sum(theta), np.sum(theta**2)]])
A = np.linalg.inv(M2) @ np.array([np.sum(self.ypts), np.sum(self.ypts * theta)]).T
return [A[1], -1.0/B1, A[0]]
def _fit_dict(self, p):
return {"A": p[0], "T1": p[1], "A0": p[2]}
def __str__(self):
return "A0 + A*exp(-t/T1)"
@property
def T1(self):
"""Return the measured T1 (i.e. decay constant of exponential).
"""
return self.fit_params["T1"]
def make_plots(self):
"""Create plot on both linear and semilog scale
"""
logger.info("Semilog plot of |1> state probability requires calibrated data.")
plt.figure(figsize=(2*6.4, 4.8))
plt.subplot(121)
plt.plot(self.xpts, self.ypts, ".", markersize=15, label="Data")
plt.plot(self.xpts, self.model(self.xpts), "-", linewidth=3, label="Fit")
plt.xlabel(self.xlabel, fontsize=14)
plt.ylabel(self.ylabel, fontsize=14)
plt.annotate(self.annotation(), xy=(0.4, 0.10), xycoords='axes fraction', size=12)
plt.subplot(122)
plt.semilogy(self.xpts, -1/2*(self.ypts - self.fit_params["A0"]), ".", markersize=15, label="Data")
plt.semilogy(self.xpts, -1/2*(self.model(self.xpts) - self.fit_params["A0"]), "-", linewidth=3, label="Fit")
plt.xlabel(self.xlabel, fontsize=14)
plt.ylabel('|1> probability', fontsize=14)
plt.suptitle(self.title, fontsize=14)
def annotation(self):
return r"$T_1$ = {0:.2e} {1} {2:.2e}".format(self.fit_params["T1"], chr(177), self.fit_errors["T1"])
class RamseyFit(AuspexFit):
"""Fit to a Ramsey experiment using either a one or two frequency decaying
sine model.
"""
xlabel = "Delay"
ylabel = r"<$\sigma_z$>"
title = "Ramsey Fit"
def __init__(self, xpts, ypts, two_freqs=True, AIC=True, make_plots=False, force=False, ax=None):
"""One or two frequency Ramsey experiment fit. If a two-frequency fit is selected
by the user or by comparing AIC scores, fit parameters are returned as tuples instead
of single numbers.
Args:
xpts (numpy.array): Time data points.
ypts (numpy.array): Qubit measurements.
two_freqs (Bool): If true, attempt a two-frequency fit of the data.
AIC (Bool): Decide between one and two frequency fits using the Akaike
information criterion.
make_plots (Bool): Display a plot of data and fit result.
ax (Axes, optional): Axes on which to draw plot. If None, new figure is created
force (Bool): Force the selection of a two-frequency fit regardless of AIC score.
"""
self.AIC = AIC
self.dict_option = two_freqs
self.two_freqs = two_freqs
self.force = force
self.plots = make_plots
self.ax = ax
assert len(xpts) == len(ypts), "Length of X and Y points must match!"
self.xpts = xpts
self.ypts = ypts
self._do_fit()
def _initial_guess_1f(self):
freqs, Tcs, amps = KT_estimation(self.ypts-np.mean(self.ypts), self.xpts, 1)
return [freqs[0], abs(amps[0]), Tcs[0], np.angle(amps[0]), np.mean(self.ypts)]
def _initial_guess_2f(self):
freqs, Tcs, amps = KT_estimation(self.ypts-np.mean(self.ypts), self.xpts, 2)
return [*freqs, *abs(amps), *Tcs, *np.angle(amps), np.mean(self.ypts)]
@staticmethod
def _ramsey_1f(x, f, A, tau, phi, y0):
return A*np.exp(-x/tau)*np.cos(2*np.pi*f*x + phi) + y0
@staticmethod
def _model_2f(x, *p):
return (RamseyFit._ramsey_1f(x, p[0], p[2], p[4], p[6], p[8]) + RamseyFit._ramsey_1f(x, p[1], p[3], p[5], p[7], p[8]))
@staticmethod
def _model_1f(x, *p):
return RamseyFit._ramsey_1f(x, p[0], p[1], p[2], p[3], p[4])
def _aicc(self, e, k, n):
return 2*k+e+(2*k*(k+1))/(n-k-1)
def _do_fit(self):
if self.two_freqs:
self.dict_option = True
self._initial_guess = self._initial_guess_2f
self._model = self._model_2f
try:
super()._do_fit()
two_freq_chi2 = self.sq_error
except:
self.two_freqs = False
logger.info("Two-frequency fit failed. Trying single-frequency fit.")
if self.two_freqs and self.AIC:
#Compare the one and two frequency fits
self.dict_option = False
self._initial_guess = self._initial_guess_1f
self._model = self._model_1f
super()._do_fit()
one_freq_chi2 = self.sq_error
aic = self._aicc(two_freq_chi2, 9, len(self.xpts)) - self._aicc(one_freq_chi2, 5, len(self.xpts))
if aic > 0 and not self.force:
self.two_freqs = False
rl = 100*np.exp(-aic/2)
logger.info(f"Selecting one-frequency fit with relative likelihood = {rl:.2f}%")
if rl>33:
logger.info("Relative likelihood of 2nd frequency high, take more averages or set force = True.")
else:
self.dict_option = True
self._initial_guess = self._initial_guess_2f
self._model = self._model_2f
super()._do_fit()
if not self.two_freqs:
self.dict_option = False
self._initial_guess = self._initial_guess_1f
self._model = self._model_1f
super()._do_fit()
if self.plots:
self.make_plots()
def annotation(self):
if self.two_freqs:
return r"$T_2$ = {0:.2e} {1} {2:.2e} "'\n'"$T_2$ = {3:.2e} {4} {5:.2e}".format(self.fit_params["tau1"], chr(177), self.fit_errors["tau1"], self.fit_params["tau2"], chr(177), self.fit_errors["tau2"])
else:
return r"$T_2$ = {0:.2e} {1} {2:.2e}".format(self.fit_params["tau"], chr(177), self.fit_errors["tau"])
@property
def T2(self):
if self.two_freqs:
return self.fit_params["tau1"], self.fit_params["tau2"]
else:
return self.fit_params["tau"]
@property
def ramsey_freq(self):
if self.two_freqs:
return self.fit_params["f1"], self.fit_params["f2"]
else:
return self.fit_params["f"]
def _fit_dict(self, p):
if self.dict_option:
return {"f1": p[0],
"A1": p[2],
"tau1": p[4],
"phi1": p[6],
"f2": p[1],
"A2": p[3],
"tau2": p[5],
"phi2": p[7],
"y0": p[8]}
else:
return {"f": p[0],
"A": p[1],
"tau": p[2],
"phi": p[3],
"y0": p[4]}
class SingleQubitRBFit(AuspexFit):
"""Fit to an RB decay curve using the model A*(r^n) + B
"""
ylabel = r"<$\sigma_z$>"
title = "Single Qubit RB Fit"
def __init__(self, lengths, data, make_plots=False, log_scale_x=True, smart_guess=True, bounded_fit=True, ax=None):
self.lengths = sorted(list(set(lengths)))
repeats = len(data) // len(self.lengths)
xpts = np.array(self.lengths)
ypts = np.mean(np.reshape(data,(len(self.lengths),repeats)),1)
self.data = data
self.data_points = np.reshape(data,(len(self.lengths),repeats))
self.errors = np.std(self.data_points, 1)
self.log_scale_x = log_scale_x
self.ax = ax
self.smart_guess = smart_guess
if log_scale_x:
self.xlabel = r"$log_2$ Clifford Number"
else:
self.xlabel = "Clifford Number"
if bounded_fit:
self.bounds = ((0, -np.inf, 0), (1, np.inf, 1))
super().__init__(xpts, ypts, make_plots=make_plots, ax=ax)
@staticmethod
def _model(x, *p):
return p[0] * (1-p[1])**x + p[2]
def _initial_guess(self):
if self.smart_guess:
## Initial guess using method of linear regression via integral equations
## https://www.scribd.com/doc/14674814/Regressions-et-equations-integrales
N = len(self.xpts)
S = np.zeros(N)
for j in range(2, N):
S[j] = S[j-1] + 0.5*((self.ypts[j] + self.ypts[j-1]) *
(self.xpts[j] - self.xpts[j-1]))
xs = self.xpts - self.xpts[0]
ys = self.ypts - self.ypts[0]
M = np.array([[np.sum(xs**2), np.sum(xs * S)],
[ | np.sum(xs * S) | numpy.sum |
from typing import List
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
import pandas as pd
from modules.CD_parameters import *
from modules.utilities import *
# check if the output directory exists
check_dir("".join((project_dir, '/figures/foo.eps')))
def flatten_list(list_of_lists: List) -> np.ndarray:
return np.array([item for sub_list in list_of_lists for item in sub_list])
start_line_number = 93 # The first line
end_line_number = 93 # The last line
following_the_spectra_catlogue = True # Read spectrumIDs or read SampleIDs first?
rows = np.array(range(start_line_number, end_line_number + 1)) - 2
# Read the files
if following_the_spectra_catlogue:
Sample_catalogue = pd.read_excel("".join((path_relab, 'Sample_Catalogue.xlsx')), index_col=None,
na_values=['NA'],
usecols="A, C, Ag", engine='openpyxl').to_numpy()
Spectra_catalogue = pd.read_excel("".join((path_relab, 'Spectra_Catalogue.xlsx')), index_col=None, na_values=['NA'],
usecols="A, B, F:H", engine='openpyxl').to_numpy()[rows]
SpectrumIDs = Spectra_catalogue[:, 0]
SampleIDs = Spectra_catalogue[:, 1]
# Range for interpolation
Start = np.array(Spectra_catalogue[:, 2])
Stop = np.array(Spectra_catalogue[:, 3])
Step = np.array(Spectra_catalogue[:, 4])
Weathering = flatten_list([Sample_catalogue[np.where(Sample_catalogue[:, 0] == SampleID)[0], 2]
for SampleID in SampleIDs])
else:
Sample_catalogue = pd.read_excel("".join((path_relab, 'Sample_Catalogue.xlsx')), index_col=None, na_values=['NA'],
usecols="A, C, Ag", engine='openpyxl').to_numpy()[rows]
Spectra_catalogue = pd.read_excel("".join((path_relab, 'Spectra_Catalogue.xlsx')), index_col=None,
na_values=['NA'], usecols="A, B, F:H", engine='openpyxl').to_numpy()
SampleIDs = np.array(Sample_catalogue[:, 0])
# Find Spectrum ID
SpectrumIDs = flatten_list([Spectra_catalogue[np.where(Spectra_catalogue[:, 1] == SampleID)[0], 0]
for SampleID in SampleIDs])
# Range for interpolation
Start = flatten_list([Spectra_catalogue[np.where(Spectra_catalogue[:, 1] == SampleID)[0], 2]
for SampleID in SampleIDs])
Stop = flatten_list([Spectra_catalogue[np.where(Spectra_catalogue[:, 1] == SampleID)[0], 3]
for SampleID in SampleIDs])
Step = flatten_list([Spectra_catalogue[np.where(Spectra_catalogue[:, 1] == SampleID)[0], 4]
for SampleID in SampleIDs])
# Find samples for the spectra (this is necessary because a spectrum can have same SampleID)
SampleIDs = flatten_list([Spectra_catalogue[np.where(Spectra_catalogue[:, 0] == SpectrumID)[0], 1]
for SpectrumID in SpectrumIDs])
Weathering = flatten_list([Sample_catalogue[np.where(Sample_catalogue[:, 0] == SampleID)[0], 2]
for SampleID in SampleIDs])
# Take only these spectra
mask = np.array(np.where((Start <= lambda_min) & (Stop >= lambda_max) & (Step <= resolution_max))).ravel()
SpectrumIDs = SpectrumIDs[mask]
SampleIDs = SampleIDs[mask]
X = np.arange(lambda_min, lambda_max + resolution_final / 2, resolution_final)
# Find corresponding PIs
PIs = flatten_list([Sample_catalogue[np.where(Sample_catalogue[:, 0] == SampleID)[0], 1] for SampleID in SampleIDs])
"""
# Sorting
idx = np.argsort(SampleIDs)
SampleIDs = SampleIDs[idx]
SpectrumIDs = SpectrumIDs[idx]
PIs = PIs[idx]
"""
if denoise:
width = 9
cent = np.int( | np.round(width / 2) | numpy.round |
import argparse
import numpy as np
import torch
import plyfile
import skimage.measure
from tqdm import tqdm
import yaml
import os.path as osp
import skimage
import time
def convert_sigma_samples_to_ply(
input_3d_sigma_array: np.ndarray,
voxel_grid_origin,
volume_size,
ply_filename_out,
level=5.0,
offset=None,
scale=None,):
"""
Convert density samples to .ply
:param input_3d_sdf_array: a float array of shape (n,n,n)
:voxel_grid_origin: a list of three floats: the bottom, left, down origin of the voxel grid
:volume_size: a list of three floats
:ply_filename_out: string, path of the filename to save to
This function adapted from: https://github.com/RobotLocomotion/spartan
"""
start_time = time.time()
verts, faces, normals, values = skimage.measure.marching_cubes(
input_3d_sigma_array, level=level, spacing=volume_size
)
# transform from voxel coordinates to camera coordinates
# note x and y are flipped in the output of marching_cubes
mesh_points = | np.zeros_like(verts) | numpy.zeros_like |
import os
import pybullet as p
import numpy as np
import time
from itertools import product
from .utils import unit_pose, safe_zip, multiply, Pose, AABB, create_box, set_pose, get_all_links, LockRenderer, \
get_aabb, pairwise_link_collision, remove_body, draw_aabb, get_box_geometry, create_shape, create_body, STATIC_MASS, \
unit_quat, unit_point, CLIENT, create_shape_array, set_color, get_point, clip, load_model, TEMP_DIR, NULL_ID, elapsed_time
MAX_TEXTURE_WIDTH = 418 # max square dimension
MAX_PIXEL_VALUE = 255
MAX_LINKS = 125 # Max links seems to be 126
class VoxelGrid(object):
# https://github.mit.edu/caelan/ROS/blob/master/sparse_voxel_grid.py
# https://github.mit.edu/caelan/ROS/blob/master/base_navigation.py
# https://github.mit.edu/caelan/ROS/blob/master/utils.py
# https://github.mit.edu/caelan/ROS/blob/master/voxel_detection.py
# TODO: can always display the grid in RVIZ after filtering
# TODO: compute the maximum sized cuboid (rectangle) in a grid (matrix)
def __init__(self, resolutions, color=(1, 0, 0, 0.5)):
#def __init__(self, sizes, centers, pose=unit_pose()):
#assert len(sizes) == len(centers)
self.resolutions = resolutions
self.occupied = set()
self.world_from_grid = unit_pose() # TODO: support for real
self.color = color
#self.bodies = None
# TODO: store voxels more intelligently spatially
def __len__(self):
return len(self.occupied)
def voxel_from_point(self, point):
return tuple(np.floor(np.divide(point, self.resolutions)).astype(int))
def voxels_from_aabb(self, aabb):
lower_voxel, upper_voxel = map(self.voxel_from_point, aabb)
return map(tuple, product(*[range(l, u + 1) for l, u in safe_zip(lower_voxel, upper_voxel)]))
def lower_from_voxel(self, voxel):
return np.multiply(voxel, self.resolutions)
def center_from_voxel(self, voxel):
return self.lower_from_voxel(voxel) + self.resolutions/2.
def upper_from_voxel(self, voxel):
return self.lower_from_voxel(voxel) + self.resolutions
def pose_from_voxel(self, voxel):
return multiply(self.world_from_grid, Pose(self.center_from_voxel(voxel)))
def aabb_from_voxel(self, voxel):
return AABB(self.lower_from_voxel(voxel), self.upper_from_voxel(voxel))
def is_occupied(self, voxel):
return voxel in self.occupied
def set_occupied(self, voxel):
if self.is_occupied(voxel):
return False
self.occupied.add(voxel)
return True
def set_free(self, voxel):
if not self.is_occupied(voxel):
return False
self.occupied.remove(voxel)
return True
def get_neighbors(self, index):
for i in range(len(index)):
direction = np.zeros(len(index), dtype=int)
for n in (-1, +1):
direction[i] = n
yield tuple(np.array(index) + direction)
def get_clusters(self, voxels=None):
if voxels is None:
voxels = self.occupied
clusters = []
assigned = set()
def dfs(current):
if (current in assigned) or (not self.is_occupied(current)):
return []
cluster = [current]
assigned.add(current)
for neighbor in self.get_neighbors(current):
cluster.extend(dfs(neighbor))
return cluster
for voxel in voxels:
cluster = dfs(voxel)
if cluster:
clusters.append(cluster)
return clusters
# TODO: implicitly check collisions
def create_box(self):
color = (0, 0, 0, 0)
#color = None
box = create_box(*self.resolutions, color=color)
#set_color(box, color=color)
set_pose(box, self.world_from_grid)
return box
def get_affected(self, bodies, occupied):
assert self.world_from_grid == unit_pose()
check_voxels = {}
for body in bodies:
for link in get_all_links(body):
aabb = get_aabb(body, link) # TODO: pad using threshold
for voxel in self.voxels_from_aabb(aabb):
if self.is_occupied(voxel) == occupied:
check_voxels.setdefault(voxel, []).append((body, link))
return check_voxels
def check_collision(self, box, voxel, pairs, threshold=0.):
box_pairs = [(box, link) for link in get_all_links(box)]
set_pose(box, self.pose_from_voxel(voxel))
return any(pairwise_link_collision(body1, link1, body2, link2, max_distance=threshold)
for (body1, link1), (body2, link2) in product(pairs, box_pairs))
def add_point(self, point):
self.set_occupied(self.voxel_from_point(point))
def add_aabb(self, aabb):
for voxel in self.voxels_from_aabb(aabb):
self.set_occupied(voxel)
def add_bodies(self, bodies, threshold=0.):
# Otherwise, need to transform bodies
check_voxels = self.get_affected(bodies, occupied=False)
box = self.create_box()
for voxel, pairs in check_voxels.items(): # pairs typically only has one element
if self.check_collision(box, voxel, pairs, threshold=threshold):
self.set_occupied(voxel)
remove_body(box)
def remove_bodies(self, bodies, threshold=1e-2):
# TODO: could also just iterate over the voxels directly
check_voxels = self.get_affected(bodies, occupied=True)
box = self.create_box()
for voxel, pairs in check_voxels.items():
if self.check_collision(box, voxel, pairs, threshold=threshold):
self.set_free(voxel)
remove_body(box)
def draw_voxel_bodies(self):
# TODO: transform into the world frame
with LockRenderer():
handles = []
for voxel in sorted(self.occupied):
handles.extend(draw_aabb(self.aabb_from_voxel(voxel), color=self.color[:3]))
return handles
def create_voxel_bodies1(self):
start_time = time.time()
geometry = get_box_geometry(*self.resolutions)
collision_id, visual_id = create_shape(geometry, color=self.color)
bodies = []
for voxel in sorted(self.occupied):
body = create_body(collision_id, visual_id)
#scale = self.resolutions[0]
#body = load_model('models/voxel.urdf', fixed_base=True, scale=scale)
set_pose(body, self.pose_from_voxel(voxel))
bodies.append(body) # 0.0462474774444 / voxel
print(elapsed_time(start_time))
return bodies
def create_voxel_bodies2(self):
geometry = get_box_geometry(*self.resolutions)
collision_id, visual_id = create_shape(geometry, color=self.color)
ordered_voxels = sorted(self.occupied)
bodies = []
for start in range(0, len(ordered_voxels), MAX_LINKS):
voxels = ordered_voxels[start:start + MAX_LINKS]
body = p.createMultiBody(#baseMass=STATIC_MASS,
#baseCollisionShapeIndex=-1,
#baseVisualShapeIndex=-1,
#basePosition=unit_point(),
#baseOrientation=unit_quat(),
#baseInertialFramePosition=unit_point(),
#baseInertialFrameOrientation=unit_quat(),
linkMasses=len(voxels)*[STATIC_MASS],
linkCollisionShapeIndices=len(voxels)*[collision_id],
linkVisualShapeIndices=len(voxels)*[visual_id],
linkPositions=list(map(self.center_from_voxel, voxels)),
linkOrientations=len(voxels)*[unit_quat()],
linkInertialFramePositions=len(voxels)*[unit_point()],
linkInertialFrameOrientations=len(voxels)*[unit_quat()],
linkParentIndices=len(voxels)*[0],
linkJointTypes=len(voxels)*[p.JOINT_FIXED],
linkJointAxis=len(voxels)*[unit_point()],
physicsClientId=CLIENT)
set_pose(body, self.world_from_grid)
bodies.append(body) # 0.0163199263677 / voxel
return bodies
def create_voxel_bodies3(self):
ordered_voxels = sorted(self.occupied)
geoms = [get_box_geometry(*self.resolutions) for _ in ordered_voxels]
poses = list(map(self.pose_from_voxel, ordered_voxels))
#colors = [list(self.color) for _ in self.voxels] # TODO: colors don't work
colors = None
collision_id, visual_id = create_shape_array(geoms, poses, colors)
body = create_body(collision_id, visual_id) # Max seems to be 16
#dump_body(body)
set_color(body, self.color)
return [body]
def create_voxel_bodies(self):
with LockRenderer():
return self.create_voxel_bodies1()
#return self.create_voxel_bodies2()
#return self.create_voxel_bodies3()
def project2d(self):
# TODO: combine adjacent voxels into larger lines
# TODO: greedy algorithm that combines lines/boxes
tallest_voxel = {}
for i, j, k in self.occupied:
tallest_voxel[i, j] = max(k, tallest_voxel.get((i, j), k))
return {(i, j, k) for (i, j), k in tallest_voxel.items()}
def create_height_map(self, plane, plane_size, width=MAX_TEXTURE_WIDTH, height=MAX_TEXTURE_WIDTH):
min_z, max_z = 0., 2.
plane_extent = plane_size*np.array([1, 1, 0])
plane_lower = get_point(plane) - plane_extent/2.
#plane_aabb = (plane_lower, plane_lower + plane_extent)
#plane_aabb = get_aabb(plane) # TODO: bounding box is effectively empty
#plane_lower, plane_upper = plane_aabb
#plane_extent = (plane_upper - plane_lower)
image_size = np.array([width, height])
# TODO: fix width/height order
pixel_from_point = lambda point: np.floor(
image_size * (point - plane_lower)[:2] / plane_extent[:2]).astype(int)
# TODO: last row/col doesn't seem to be filled
height_map = np.zeros(image_size)
for voxel in self.project2d():
voxel_aabb = self.aabb_from_voxel(voxel)
#if not aabb_contains_aabb(aabb2d_from_aabb(voxel_aabb), aabb2d_from_aabb(plane_aabb)):
# continue
(x1, y1), (x2, y2) = map(pixel_from_point, voxel_aabb)
if (x1 < 0) or (width <= x2) or (y1 < 0) or (height <= y2):
continue
scaled_z = (clip(voxel_aabb[1][2], min_z, max_z) - min_z) / max_z
for c in range(x1, x2+1):
for y in range(y1, y2+1):
r = height - y - 1 # TODO: can also just set in bulk if using height_map
height_map[r, c] = max(height_map[r, c], scaled_z)
return height_map
def create_textured_square(size, color=None,
width=MAX_TEXTURE_WIDTH, height=MAX_TEXTURE_WIDTH):
body = load_model('models/square.urdf', scale=size)
if color is not None:
set_color(body, color)
path = os.path.join(TEMP_DIR, 'texture.png')
image = MAX_PIXEL_VALUE*np.ones((width, height, 3), dtype=np.uint8)
import scipy.misc
scipy.misc.imsave(path, image)
texture = p.loadTexture(path)
p.changeVisualShape(body, NULL_ID, textureUniqueId=texture, physicsClientId=CLIENT)
return body, texture
def set_texture(texture, image):
# Alias/WaveFront Material (.mtl) File Format
# https://people.cs.clemson.edu/~dhouse/courses/405/docs/brief-mtl-file-format.html
#print(get_visual_data(body))
width, height, channels = image.shape
pixels = image.flatten().tolist()
assert len(pixels) <= 524288
# b3Printf: uploadBulletFileToSharedMemory 747003 exceeds max size 524288
p.changeTexture(texture, pixels, width, height, physicsClientId=CLIENT)
# TODO: it's important that width and height are the same as the original
def rgb_interpolate(grey_image, min_color, max_color):
width, height = grey_image.shape
channels = 3
rgb_image = | np.zeros((width, height, channels), dtype=np.uint8) | numpy.zeros |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import tempfile
import warnings
import numpy
from numpy import testing as npt
import tables
from tables import Atom, ClosedNodeError, NoSuchNodeError
from tables.utils import byteorders
from tables.tests import common
from tables.tests.common import allequal
from tables.tests.common import unittest, test_filename
from tables.tests.common import PyTablesTestCase as TestCase
from six.moves import range
#warnings.resetwarnings()
class BasicTestCase(TestCase):
"""Basic test for all the supported typecodes present in numpy.
All of them are included on pytables.
"""
endiancheck = False
def write_read(self, testarray):
a = testarray
if common.verbose:
print('\n', '-=' * 30)
print("Running test for array with type '%s'" % a.dtype.type,
end=' ')
print("for class check:", self.title)
# Create an instance of HDF5 file
filename = tempfile.mktemp(".h5")
try:
with tables.open_file(filename, mode="w") as fileh:
root = fileh.root
# Create the array under root and name 'somearray'
if self.endiancheck and a.dtype.kind != "S":
b = a.byteswap()
b.dtype = a.dtype.newbyteorder()
a = b
fileh.create_array(root, 'somearray', a, "Some array")
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
root = fileh.root
# Read the saved array
b = root.somearray.read()
# Compare them. They should be equal.
if common.verbose and not allequal(a, b):
print("Write and read arrays differ!")
# print("Array written:", a)
print("Array written shape:", a.shape)
print("Array written itemsize:", a.itemsize)
print("Array written type:", a.dtype.type)
# print("Array read:", b)
print("Array read shape:", b.shape)
print("Array read itemsize:", b.itemsize)
print("Array read type:", b.dtype.type)
if a.dtype.kind != "S":
print("Array written byteorder:", a.dtype.byteorder)
print("Array read byteorder:", b.dtype.byteorder)
# Check strictly the array equality
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.shape, root.somearray.shape)
if a.dtype.kind == "S":
self.assertEqual(root.somearray.atom.type, "string")
else:
self.assertEqual(a.dtype.type, b.dtype.type)
self.assertEqual(a.dtype.type,
root.somearray.atom.dtype.type)
abo = byteorders[a.dtype.byteorder]
bbo = byteorders[b.dtype.byteorder]
if abo != "irrelevant":
self.assertEqual(abo, root.somearray.byteorder)
self.assertEqual(bbo, sys.byteorder)
if self.endiancheck:
self.assertNotEqual(bbo, abo)
obj = root.somearray
self.assertEqual(obj.flavor, 'numpy')
self.assertEqual(obj.shape, a.shape)
self.assertEqual(obj.ndim, a.ndim)
self.assertEqual(obj.chunkshape, None)
if a.shape:
nrows = a.shape[0]
else:
# scalar
nrows = 1
self.assertEqual(obj.nrows, nrows)
self.assertTrue(allequal(a, b))
finally:
# Then, delete the file
os.remove(filename)
def write_read_out_arg(self, testarray):
a = testarray
if common.verbose:
print('\n', '-=' * 30)
print("Running test for array with type '%s'" % a.dtype.type,
end=' ')
print("for class check:", self.title)
# Create an instance of HDF5 file
filename = tempfile.mktemp(".h5")
try:
with tables.open_file(filename, mode="w") as fileh:
root = fileh.root
# Create the array under root and name 'somearray'
if self.endiancheck and a.dtype.kind != "S":
b = a.byteswap()
b.dtype = a.dtype.newbyteorder()
a = b
fileh.create_array(root, 'somearray', a, "Some array")
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
root = fileh.root
# Read the saved array
b = numpy.empty_like(a, dtype=a.dtype)
root.somearray.read(out=b)
# Check strictly the array equality
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.shape, root.somearray.shape)
if a.dtype.kind == "S":
self.assertEqual(root.somearray.atom.type, "string")
else:
self.assertEqual(a.dtype.type, b.dtype.type)
self.assertEqual(a.dtype.type,
root.somearray.atom.dtype.type)
abo = byteorders[a.dtype.byteorder]
bbo = byteorders[b.dtype.byteorder]
if abo != "irrelevant":
self.assertEqual(abo, root.somearray.byteorder)
self.assertEqual(abo, bbo)
if self.endiancheck:
self.assertNotEqual(bbo, sys.byteorder)
self.assertTrue(allequal(a, b))
finally:
# Then, delete the file
os.remove(filename)
def write_read_atom_shape_args(self, testarray):
a = testarray
atom = Atom.from_dtype(a.dtype)
shape = a.shape
byteorder = None
if common.verbose:
print('\n', '-=' * 30)
print("Running test for array with type '%s'" % a.dtype.type,
end=' ')
print("for class check:", self.title)
# Create an instance of HDF5 file
filename = tempfile.mktemp(".h5")
try:
with tables.open_file(filename, mode="w") as fileh:
root = fileh.root
# Create the array under root and name 'somearray'
if self.endiancheck and a.dtype.kind != "S":
b = a.byteswap()
b.dtype = a.dtype.newbyteorder()
if b.dtype.byteorder in ('>', '<'):
byteorder = byteorders[b.dtype.byteorder]
a = b
ptarr = fileh.create_array(root, 'somearray',
atom=atom, shape=shape,
title="Some array",
# specify the byteorder explicitly
# since there is no way to deduce
# it in this case
byteorder=byteorder)
self.assertEqual(shape, ptarr.shape)
self.assertEqual(atom, ptarr.atom)
ptarr[...] = a
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
root = fileh.root
# Read the saved array
b = root.somearray.read()
# Compare them. They should be equal.
if common.verbose and not allequal(a, b):
print("Write and read arrays differ!")
# print("Array written:", a)
print("Array written shape:", a.shape)
print("Array written itemsize:", a.itemsize)
print("Array written type:", a.dtype.type)
# print("Array read:", b)
print("Array read shape:", b.shape)
print("Array read itemsize:", b.itemsize)
print("Array read type:", b.dtype.type)
if a.dtype.kind != "S":
print("Array written byteorder:", a.dtype.byteorder)
print("Array read byteorder:", b.dtype.byteorder)
# Check strictly the array equality
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.shape, root.somearray.shape)
if a.dtype.kind == "S":
self.assertEqual(root.somearray.atom.type, "string")
else:
self.assertEqual(a.dtype.type, b.dtype.type)
self.assertEqual(a.dtype.type,
root.somearray.atom.dtype.type)
abo = byteorders[a.dtype.byteorder]
bbo = byteorders[b.dtype.byteorder]
if abo != "irrelevant":
self.assertEqual(abo, root.somearray.byteorder)
self.assertEqual(bbo, sys.byteorder)
if self.endiancheck:
self.assertNotEqual(bbo, abo)
obj = root.somearray
self.assertEqual(obj.flavor, 'numpy')
self.assertEqual(obj.shape, a.shape)
self.assertEqual(obj.ndim, a.ndim)
self.assertEqual(obj.chunkshape, None)
if a.shape:
nrows = a.shape[0]
else:
# scalar
nrows = 1
self.assertEqual(obj.nrows, nrows)
self.assertTrue(allequal(a, b))
finally:
# Then, delete the file
os.remove(filename)
def setup00_char(self):
"""Data integrity during recovery (character objects)"""
if not isinstance(self.tupleChar, numpy.ndarray):
a = numpy.array(self.tupleChar, dtype="S")
else:
a = self.tupleChar
return a
def test00_char(self):
a = self.setup00_char()
self.write_read(a)
def test00_char_out_arg(self):
a = self.setup00_char()
self.write_read_out_arg(a)
def test00_char_atom_shape_args(self):
a = self.setup00_char()
self.write_read_atom_shape_args(a)
def test00b_char(self):
"""Data integrity during recovery (string objects)"""
a = self.tupleChar
filename = tempfile.mktemp(".h5")
try:
# Create an instance of HDF5 file
with tables.open_file(filename, mode="w") as fileh:
fileh.create_array(fileh.root, 'somearray', a, "Some array")
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
# Read the saved array
b = fileh.root.somearray.read()
if isinstance(a, bytes):
self.assertEqual(type(b), bytes)
self.assertEqual(a, b)
else:
# If a is not a python string, then it should be a list
# or ndarray
self.assertTrue(type(b) in [list, numpy.ndarray])
finally:
# Then, delete the file
os.remove(filename)
def test00b_char_out_arg(self):
"""Data integrity during recovery (string objects)"""
a = self.tupleChar
filename = tempfile.mktemp(".h5")
try:
# Create an instance of HDF5 file
with tables.open_file(filename, mode="w") as fileh:
fileh.create_array(fileh.root, 'somearray', a, "Some array")
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
# Read the saved array
b = numpy.empty_like(a)
if fileh.root.somearray.flavor != 'numpy':
self.assertRaises(TypeError,
lambda: fileh.root.somearray.read(out=b))
else:
fileh.root.somearray.read(out=b)
self.assertTrue(type(b), numpy.ndarray)
finally:
# Then, delete the file
os.remove(filename)
def test00b_char_atom_shape_args(self):
"""Data integrity during recovery (string objects)"""
a = self.tupleChar
filename = tempfile.mktemp(".h5")
try:
# Create an instance of HDF5 file
with tables.open_file(filename, mode="w") as fileh:
nparr = numpy.asarray(a)
atom = Atom.from_dtype(nparr.dtype)
shape = nparr.shape
if nparr.dtype.byteorder in ('>', '<'):
byteorder = byteorders[nparr.dtype.byteorder]
else:
byteorder = None
ptarr = fileh.create_array(fileh.root, 'somearray',
atom=atom, shape=shape,
byteorder=byteorder,
title="Some array")
self.assertEqual(shape, ptarr.shape)
self.assertEqual(atom, ptarr.atom)
ptarr[...] = a
# Re-open the file in read-only mode
with tables.open_file(filename, mode="r") as fileh:
# Read the saved array
b = numpy.empty_like(a)
if fileh.root.somearray.flavor != 'numpy':
self.assertRaises(TypeError,
lambda: fileh.root.somearray.read(out=b))
else:
fileh.root.somearray.read(out=b)
self.assertTrue(type(b), numpy.ndarray)
finally:
# Then, delete the file
os.remove(filename)
def setup01_char_nc(self):
"""Data integrity during recovery (non-contiguous character objects)"""
if not isinstance(self.tupleChar, numpy.ndarray):
a = numpy.array(self.tupleChar, dtype="S")
else:
a = self.tupleChar
if a.ndim == 0:
b = a.copy()
else:
b = a[::2]
# Ensure that this numpy string is non-contiguous
if len(b) > 1:
self.assertEqual(b.flags.contiguous, False)
return b
def test01_char_nc(self):
b = self.setup01_char_nc()
self.write_read(b)
def test01_char_nc_out_arg(self):
b = self.setup01_char_nc()
self.write_read_out_arg(b)
def test01_char_nc_atom_shape_args(self):
b = self.setup01_char_nc()
self.write_read_atom_shape_args(b)
def test02_types(self):
"""Data integrity during recovery (numerical types)"""
typecodes = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64',
'complex64', 'complex128']
for name in ('float16', 'float96', 'float128',
'complex192', 'complex256'):
atomname = name.capitalize() + 'Atom'
if hasattr(tables, atomname):
typecodes.append(name)
for typecode in typecodes:
a = numpy.array(self.tupleInt, typecode)
self.write_read(a)
b = numpy.array(self.tupleInt, typecode)
self.write_read_out_arg(b)
c = numpy.array(self.tupleInt, typecode)
self.write_read_atom_shape_args(c)
def test03_types_nc(self):
"""Data integrity during recovery (non-contiguous numerical types)"""
typecodes = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64',
'complex64', 'complex128', ]
for name in ('float16', 'float96', 'float128',
'complex192', 'complex256'):
atomname = name.capitalize() + 'Atom'
if hasattr(tables, atomname):
typecodes.append(name)
for typecode in typecodes:
a = numpy.array(self.tupleInt, typecode)
if a.ndim == 0:
b1 = a.copy()
b2 = a.copy()
b3 = a.copy()
else:
b1 = a[::2]
b2 = a[::2]
b3 = a[::2]
# Ensure that this array is non-contiguous
if len(b1) > 1:
self.assertEqual(b1.flags.contiguous, False)
if len(b2) > 1:
self.assertEqual(b2.flags.contiguous, False)
if len(b3) > 1:
self.assertEqual(b3.flags.contiguous, False)
self.write_read(b1)
self.write_read_out_arg(b2)
self.write_read_atom_shape_args(b3)
class Basic0DOneTestCase(BasicTestCase):
# Scalar case
title = "Rank-0 case 1"
tupleInt = 3
tupleChar = b"3"
endiancheck = True
class Basic0DTwoTestCase(BasicTestCase):
# Scalar case
title = "Rank-0 case 2"
tupleInt = 33
tupleChar = b"33"
endiancheck = True
class Basic1DZeroTestCase(BasicTestCase):
# This test case is not supported by PyTables (HDF5 limitations)
# 1D case
title = "Rank-1 case 0"
tupleInt = ()
tupleChar = ()
endiancheck = False
class Basic1DOneTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 1"
tupleInt = (3,)
tupleChar = (b"a",)
endiancheck = True
class Basic1DTwoTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 2"
tupleInt = (3, 4)
tupleChar = (b"aaa",)
endiancheck = True
class Basic1DThreeTestCase(BasicTestCase):
# 1D case
title = "Rank-1 case 3"
tupleInt = (3, 4, 5)
tupleChar = (b"aaa", b"bbb",)
endiancheck = True
class Basic2DOneTestCase(BasicTestCase):
# 2D case
title = "Rank-2 case 1"
tupleInt = numpy.array(numpy.arange((4)**2))
tupleInt.shape = (4,)*2
tupleChar = numpy.array(["abc"]*3**2, dtype="S3")
tupleChar.shape = (3,)*2
endiancheck = True
class Basic2DTwoTestCase(BasicTestCase):
# 2D case, with a multidimensional dtype
title = "Rank-2 case 2"
tupleInt = numpy.array(numpy.arange((4)), dtype=(numpy.int_, (4,)))
tupleChar = numpy.array(["abc"]*3, dtype=("S3", (3,)))
endiancheck = True
class Basic10DTestCase(BasicTestCase):
# 10D case
title = "Rank-10 test"
tupleInt = numpy.array(numpy.arange((2)**10))
tupleInt.shape = (2,)*10
tupleChar = numpy.array(
["abc"]*2**10, dtype="S3")
tupleChar.shape = (2,)*10
endiancheck = True
class Basic32DTestCase(BasicTestCase):
# 32D case (maximum)
title = "Rank-32 test"
tupleInt = numpy.array((32,))
tupleInt.shape = (1,)*32
tupleChar = numpy.array(["121"], dtype="S3")
tupleChar.shape = (1,)*32
class ReadOutArgumentTests(common.TempFileMixin, TestCase):
def setUp(self):
super(ReadOutArgumentTests, self).setUp()
self.size = 1000
def create_array(self):
array = numpy.arange(self.size, dtype='f8')
disk_array = self.h5file.create_array('/', 'array', array)
return array, disk_array
def test_read_entire_array(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size, ), 'f8')
disk_array.read(out=out_buffer)
numpy.testing.assert_equal(out_buffer, array)
def test_read_contiguous_slice1(self):
array, disk_array = self.create_array()
out_buffer = numpy.arange(self.size, dtype='f8')
out_buffer = numpy.random.permutation(out_buffer)
out_buffer_orig = out_buffer.copy()
start = self.size // 2
disk_array.read(start=start, stop=self.size, out=out_buffer[start:])
numpy.testing.assert_equal(out_buffer[start:], array[start:])
numpy.testing.assert_equal(out_buffer[:start], out_buffer_orig[:start])
def test_read_contiguous_slice2(self):
array, disk_array = self.create_array()
out_buffer = numpy.arange(self.size, dtype='f8')
out_buffer = numpy.random.permutation(out_buffer)
out_buffer_orig = out_buffer.copy()
start = self.size // 4
stop = self.size - start
disk_array.read(start=start, stop=stop, out=out_buffer[start:stop])
numpy.testing.assert_equal(out_buffer[start:stop], array[start:stop])
numpy.testing.assert_equal(out_buffer[:start], out_buffer_orig[:start])
numpy.testing.assert_equal(out_buffer[stop:], out_buffer_orig[stop:])
def test_read_non_contiguous_slice_contiguous_buffer(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size // 2, ), dtype='f8')
disk_array.read(start=0, stop=self.size, step=2, out=out_buffer)
numpy.testing.assert_equal(out_buffer, array[0:self.size:2])
def test_read_non_contiguous_buffer(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size, ), 'f8')
out_buffer_slice = out_buffer[0:self.size:2]
# once Python 2.6 support is dropped, this could change
# to assertRaisesRegexp to check exception type and message at once
self.assertRaises(ValueError, disk_array.read, 0, self.size, 2,
out_buffer_slice)
try:
disk_array.read(0, self.size, 2, out_buffer_slice)
except ValueError as exc:
self.assertEqual('output array not C contiguous', str(exc))
def test_buffer_too_small(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size // 2, ), 'f8')
self.assertRaises(ValueError, disk_array.read, 0, self.size, 1,
out_buffer)
try:
disk_array.read(0, self.size, 1, out_buffer)
except ValueError as exc:
self.assertTrue('output array size invalid, got' in str(exc))
def test_buffer_too_large(self):
array, disk_array = self.create_array()
out_buffer = numpy.empty((self.size + 1, ), 'f8')
self.assertRaises(ValueError, disk_array.read, 0, self.size, 1,
out_buffer)
try:
disk_array.read(0, self.size, 1, out_buffer)
except ValueError as exc:
self.assertTrue('output array size invalid, got' in str(exc))
class SizeOnDiskInMemoryPropertyTestCase(common.TempFileMixin, TestCase):
def setUp(self):
super(SizeOnDiskInMemoryPropertyTestCase, self).setUp()
self.array_size = (10, 10)
self.array = self.h5file.create_array(
'/', 'somearray', numpy.zeros(self.array_size, 'i4'))
def test_all_zeros(self):
self.assertEqual(self.array.size_on_disk, 10 * 10 * 4)
self.assertEqual(self.array.size_in_memory, 10 * 10 * 4)
class UnalignedAndComplexTestCase(common.TempFileMixin, TestCase):
"""Basic test for all the supported typecodes present in numpy.
Most of them are included on PyTables.
"""
def setUp(self):
super(UnalignedAndComplexTestCase, self).setUp()
self.root = self.h5file.root
def write_read(self, testArray):
if common.verbose:
print('\n', '-=' * 30)
print("\nRunning test for array with type '%s'" %
testArray.dtype.type)
# Create the array under root and name 'somearray'
a = testArray
if self.endiancheck:
byteorder = {"little": "big", "big": "little"}[sys.byteorder]
else:
byteorder = sys.byteorder
self.h5file.create_array(self.root, 'somearray', a, "Some array",
byteorder=byteorder)
if self.reopen:
self._reopen()
self.root = self.h5file.root
# Read the saved array
b = self.root.somearray.read()
# Get an array to be compared in the correct byteorder
c = a.newbyteorder(byteorder)
# Compare them. They should be equal.
if not allequal(c, b) and common.verbose:
print("Write and read arrays differ!")
print("Array written:", a)
print("Array written shape:", a.shape)
print("Array written itemsize:", a.itemsize)
print("Array written type:", a.dtype.type)
print("Array read:", b)
print("Array read shape:", b.shape)
print("Array read itemsize:", b.itemsize)
print("Array read type:", b.dtype.type)
# Check strictly the array equality
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.shape, self.root.somearray.shape)
if a.dtype.byteorder != "|":
self.assertEqual(a.dtype, b.dtype)
self.assertEqual(a.dtype, self.root.somearray.atom.dtype)
self.assertEqual(byteorders[b.dtype.byteorder], sys.byteorder)
self.assertEqual(self.root.somearray.byteorder, byteorder)
self.assertTrue(allequal(c, b))
def test01_signedShort_unaligned(self):
"""Checking an unaligned signed short integer array"""
r = numpy.rec.array(b'a'*200, formats='i1,f4,i2', shape=10)
a = r["f2"]
# Ensure that this array is non-aligned
self.assertEqual(a.flags.aligned, False)
self.assertEqual(a.dtype.type, numpy.int16)
self.write_read(a)
def test02_float_unaligned(self):
"""Checking an unaligned single precision array"""
r = numpy.rec.array(b'a'*200, formats='i1,f4,i2', shape=10)
a = r["f1"]
# Ensure that this array is non-aligned
self.assertEqual(a.flags.aligned, 0)
self.assertEqual(a.dtype.type, numpy.float32)
self.write_read(a)
def test03_byte_offset(self):
"""Checking an offsetted byte array"""
r = numpy.arange(100, dtype=numpy.int8)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test04_short_offset(self):
"""Checking an offsetted unsigned short int precision array"""
r = numpy.arange(100, dtype=numpy.uint32)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test05_int_offset(self):
"""Checking an offsetted integer array"""
r = numpy.arange(100, dtype=numpy.int32)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test06_longlongint_offset(self):
"""Checking an offsetted long long integer array"""
r = numpy.arange(100, dtype=numpy.int64)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test07_float_offset(self):
"""Checking an offsetted single precision array"""
r = numpy.arange(100, dtype=numpy.float32)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test08_double_offset(self):
"""Checking an offsetted double precision array"""
r = numpy.arange(100, dtype=numpy.float64)
r.shape = (10, 10)
a = r[2]
self.write_read(a)
def test09_float_offset_unaligned(self):
"""Checking an unaligned and offsetted single precision array"""
r = numpy.rec.array(b'a'*200, formats='i1,3f4,i2', shape=10)
a = r["f1"][3]
# Ensure that this array is non-aligned
self.assertEqual(a.flags.aligned, False)
self.assertEqual(a.dtype.type, numpy.float32)
self.write_read(a)
def test10_double_offset_unaligned(self):
"""Checking an unaligned and offsetted double precision array"""
r = numpy.rec.array(b'a'*400, formats='i1,3f8,i2', shape=10)
a = r["f1"][3]
# Ensure that this array is non-aligned
self.assertEqual(a.flags.aligned, False)
self.assertEqual(a.dtype.type, numpy.float64)
self.write_read(a)
def test11_int_byteorder(self):
"""Checking setting data with different byteorder in a range
(integer)"""
# Save an array with the reversed byteorder on it
a = numpy.arange(25, dtype=numpy.int32).reshape(5, 5)
a = a.byteswap()
a = a.newbyteorder()
array = self.h5file.create_array(
self.h5file.root, 'array', a, "byteorder (int)")
# Read a subarray (got an array with the machine byteorder)
b = array[2:4, 3:5]
b = b.byteswap()
b = b.newbyteorder()
# Set this subarray back to the array
array[2:4, 3:5] = b
b = b.byteswap()
b = b.newbyteorder()
# Set this subarray back to the array
array[2:4, 3:5] = b
# Check that the array is back in the correct byteorder
c = array[...]
if common.verbose:
print("byteorder of array on disk-->", array.byteorder)
print("byteorder of subarray-->", b.dtype.byteorder)
print("subarray-->", b)
print("retrieved array-->", c)
self.assertTrue(allequal(a, c))
def test12_float_byteorder(self):
"""Checking setting data with different byteorder in a range (float)"""
# Save an array with the reversed byteorder on it
a = numpy.arange(25, dtype=numpy.float64).reshape(5, 5)
a = a.byteswap()
a = a.newbyteorder()
array = self.h5file.create_array(
self.h5file.root, 'array', a, "byteorder (float)")
# Read a subarray (got an array with the machine byteorder)
b = array[2:4, 3:5]
b = b.byteswap()
b = b.newbyteorder()
# Set this subarray back to the array
array[2:4, 3:5] = b
b = b.byteswap()
b = b.newbyteorder()
# Set this subarray back to the array
array[2:4, 3:5] = b
# Check that the array is back in the correct byteorder
c = array[...]
if common.verbose:
print("byteorder of array on disk-->", array.byteorder)
print("byteorder of subarray-->", b.dtype.byteorder)
print("subarray-->", b)
print("retrieved array-->", c)
self.assertTrue(allequal(a, c))
class ComplexNotReopenNotEndianTestCase(UnalignedAndComplexTestCase):
endiancheck = False
reopen = False
class ComplexReopenNotEndianTestCase(UnalignedAndComplexTestCase):
endiancheck = False
reopen = True
class ComplexNotReopenEndianTestCase(UnalignedAndComplexTestCase):
endiancheck = True
reopen = False
class ComplexReopenEndianTestCase(UnalignedAndComplexTestCase):
endiancheck = True
reopen = True
class GroupsArrayTestCase(common.TempFileMixin, TestCase):
"""This test class checks combinations of arrays with groups."""
def test00_iterativeGroups(self):
"""Checking combinations of arrays with groups."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test00_iterativeGroups..." %
self.__class__.__name__)
# Get the root group
group = self.h5file.root
# Set the type codes to test
# The typecodes below does expose an ambiguity that is reported in:
# http://projects.scipy.org/scipy/numpy/ticket/283 and
# http://projects.scipy.org/scipy/numpy/ticket/290
typecodes = ['b', 'B', 'h', 'H', 'i', 'I', 'l', 'L', 'q', 'f', 'd',
'F', 'D']
if hasattr(tables, 'Float16Atom'):
typecodes.append('e')
if hasattr(tables, 'Float96Atom') or hasattr(tables, 'Float128Atom'):
typecodes.append('g')
if (hasattr(tables, 'Complex192Atom') or
hasattr(tables, 'Complex256Atom')):
typecodes.append('G')
for i, typecode in enumerate(typecodes):
a = numpy.ones((3,), typecode)
dsetname = 'array_' + typecode
if common.verbose:
print("Creating dataset:", group._g_join(dsetname))
self.h5file.create_array(group, dsetname, a, "Large array")
group = self.h5file.create_group(group, 'group' + str(i))
# Reopen the file
self._reopen()
# Get the root group
group = self.h5file.root
# Get the metadata on the previosly saved arrays
for i in range(len(typecodes)):
# Create an array for later comparison
a = numpy.ones((3,), typecodes[i])
# Get the dset object hanging from group
dset = getattr(group, 'array_' + typecodes[i])
# Get the actual array
b = dset.read()
if common.verbose:
print("Info from dataset:", dset._v_pathname)
print(" shape ==>", dset.shape, end=' ')
print(" type ==> %s" % dset.atom.dtype)
print("Array b read from file. Shape: ==>", b.shape, end=' ')
print(". Type ==> %s" % b.dtype)
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.dtype, b.dtype)
self.assertTrue(allequal(a, b))
# Iterate over the next group
group = getattr(group, 'group' + str(i))
def test01_largeRankArrays(self):
"""Checking creation of large rank arrays (0 < rank <= 32)
It also uses arrays ranks which ranges until maxrank.
"""
# maximum level of recursivity (deepest group level) achieved:
# maxrank = 32 (for a effective maximum rank of 32)
# This limit is due to HDF5 library limitations.
minrank = 1
maxrank = 32
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_largeRankArrays..." %
self.__class__.__name__)
print("Maximum rank for tested arrays:", maxrank)
group = self.h5file.root
if common.verbose:
print("Rank array writing progress: ", end=' ')
for rank in range(minrank, maxrank + 1):
# Create an array of integers, with incrementally bigger ranges
a = numpy.ones((1,) * rank, numpy.int32)
if common.verbose:
print("%3d," % (rank), end=' ')
self.h5file.create_array(group, "array", a, "Rank: %s" % rank)
group = self.h5file.create_group(group, 'group' + str(rank))
# Reopen the file
self._reopen()
group = self.h5file.root
if common.verbose:
print()
print("Rank array reading progress: ")
# Get the metadata on the previosly saved arrays
for rank in range(minrank, maxrank + 1):
# Create an array for later comparison
a = numpy.ones((1,) * rank, numpy.int32)
# Get the actual array
b = group.array.read()
if common.verbose:
print("%3d," % (rank), end=' ')
if common.verbose and not allequal(a, b):
print("Info from dataset:", group.array._v_pathname)
print(" Shape: ==>", group.array.shape, end=' ')
print(" typecode ==> %c" % group.array.typecode)
print("Array b read from file. Shape: ==>", b.shape, end=' ')
print(". Type ==> %c" % b.dtype)
self.assertEqual(a.shape, b.shape)
self.assertEqual(a.dtype, b.dtype)
self.assertTrue(allequal(a, b))
# print(self.h5file)
# Iterate over the next group
group = self.h5file.get_node(group, 'group' + str(rank))
if common.verbose:
print() # This flush the stdout buffer
class CopyTestCase(common.TempFileMixin, TestCase):
def test01_copy(self):
"""Checking Array.copy() method."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Copy to another Array
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
if common.verbose:
print("array1-->", array1.read())
print("array2-->", array2.read())
# print("dirs-->", dir(array1), dir(array2))
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Check that all the elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.flavor, array2.flavor)
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.title, array2.title)
def test02_copy(self):
"""Checking Array.copy() method (where specified)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Copy to another Array
group1 = self.h5file.create_group("/", "group1")
array2 = array1.copy(group1, 'array2')
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.group1.array2
if common.verbose:
print("array1-->", array1.read())
print("array2-->", array2.read())
# print("dirs-->", dir(array1), dir(array2))
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Check that all the elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.flavor, array2.flavor)
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.title, array2.title)
def test03_copy(self):
"""Checking Array.copy() method (checking title copying)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test04_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
# Copy it to another Array
array2 = array1.copy('/', 'array2', title="title array2")
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
# Assert user attributes
if common.verbose:
print("title of destination array-->", array2.title)
self.assertEqual(array2.title, "title array2")
def test04_copy(self):
"""Checking Array.copy() method (user attributes copied)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test05_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
# Copy it to another Array
array2 = array1.copy('/', 'array2', copyuserattrs=1)
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
if common.verbose:
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Assert user attributes
self.assertEqual(array2.attrs.attr1, "attr1")
self.assertEqual(array2.attrs.attr2, 2)
def test04b_copy(self):
"""Checking Array.copy() method (user attributes not copied)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test05b_copy..." % self.__class__.__name__)
# Create an Array
arr = numpy.array([[456, 2], [3, 457]], dtype='int16')
array1 = self.h5file.create_array(
self.h5file.root, 'array1', arr, "title array1")
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
# Copy it to another Array
array2 = array1.copy('/', 'array2', copyuserattrs=0)
if self.close:
if common.verbose:
print("(closing file version)")
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
if common.verbose:
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Assert user attributes
self.assertEqual(hasattr(array2.attrs, "attr1"), 0)
self.assertEqual(hasattr(array2.attrs, "attr2"), 0)
class CloseCopyTestCase(CopyTestCase):
close = 1
class OpenCopyTestCase(CopyTestCase):
close = 0
class CopyIndexTestCase(common.TempFileMixin, TestCase):
def test01_index(self):
"""Checking Array.copy() method with indexes."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_index..." % self.__class__.__name__)
# Create a numpy
r = numpy.arange(200, dtype='int32')
r.shape = (100, 2)
# Save it in a array:
array1 = self.h5file.create_array(
self.h5file.root, 'array1', r, "title array1")
# Copy to another array
array2 = array1.copy("/", 'array2',
start=self.start,
stop=self.stop,
step=self.step)
if common.verbose:
print("array1-->", array1.read())
print("array2-->", array2.read())
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Check that all the elements are equal
r2 = r[self.start:self.stop:self.step]
self.assertTrue(allequal(r2, array2.read()))
# Assert the number of rows in array
if common.verbose:
print("nrows in array2-->", array2.nrows)
print("and it should be-->", r2.shape[0])
self.assertEqual(r2.shape[0], array2.nrows)
def test02_indexclosef(self):
"""Checking Array.copy() method with indexes (close file version)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02_indexclosef..." % self.__class__.__name__)
# Create a numpy
r = numpy.arange(200, dtype='int32')
r.shape = (100, 2)
# Save it in a array:
array1 = self.h5file.create_array(
self.h5file.root, 'array1', r, "title array1")
# Copy to another array
array2 = array1.copy("/", 'array2',
start=self.start,
stop=self.stop,
step=self.step)
# Close and reopen the file
self._reopen()
array1 = self.h5file.root.array1
array2 = self.h5file.root.array2
if common.verbose:
print("array1-->", array1.read())
print("array2-->", array2.read())
print("attrs array1-->", repr(array1.attrs))
print("attrs array2-->", repr(array2.attrs))
# Check that all the elements are equal
r2 = r[self.start:self.stop:self.step]
self.assertTrue(allequal(r2, array2.read()))
# Assert the number of rows in array
if common.verbose:
print("nrows in array2-->", array2.nrows)
print("and it should be-->", r2.shape[0])
self.assertEqual(r2.shape[0], array2.nrows)
class CopyIndex1TestCase(CopyIndexTestCase):
start = 0
stop = 7
step = 1
class CopyIndex2TestCase(CopyIndexTestCase):
start = 0
stop = -1
step = 1
class CopyIndex3TestCase(CopyIndexTestCase):
start = 1
stop = 7
step = 1
class CopyIndex4TestCase(CopyIndexTestCase):
start = 0
stop = 6
step = 1
class CopyIndex5TestCase(CopyIndexTestCase):
start = 3
stop = 7
step = 1
class CopyIndex6TestCase(CopyIndexTestCase):
start = 3
stop = 6
step = 2
class CopyIndex7TestCase(CopyIndexTestCase):
start = 0
stop = 7
step = 10
class CopyIndex8TestCase(CopyIndexTestCase):
start = 6
stop = -1 # Negative values means starting from the end
step = 1
class CopyIndex9TestCase(CopyIndexTestCase):
start = 3
stop = 4
step = 1
class CopyIndex10TestCase(CopyIndexTestCase):
start = 3
stop = 4
step = 2
class CopyIndex11TestCase(CopyIndexTestCase):
start = -3
stop = -1
step = 2
class CopyIndex12TestCase(CopyIndexTestCase):
start = -1 # Should point to the last element
stop = None # None should mean the last element (including it)
step = 1
class GetItemTestCase(common.TempFileMixin, TestCase):
def test00_single(self):
"""Single element access (character types)"""
# Create the array under root and name 'somearray'
a = self.charList
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original first element:", a[0], type(a[0]))
print("Read first element:", arr[0], type(arr[0]))
self.assertTrue(allequal(a[0], arr[0]))
self.assertEqual(type(a[0]), type(arr[0]))
def test01_single(self):
"""Single element access (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalList
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original first element:", a[0], type(a[0]))
print("Read first element:", arr[0], type(arr[0]))
self.assertEqual(a[0], arr[0])
self.assertEqual(type(a[0]), type(arr[0]))
def test02_range(self):
"""Range element access (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4])
print("Read elements:", arr[1:4])
self.assertTrue(allequal(a[1:4], arr[1:4]))
def test03_range(self):
"""Range element access (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4])
print("Read elements:", arr[1:4])
self.assertTrue(allequal(a[1:4], arr[1:4]))
def test04_range(self):
"""Range element access, strided (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4:2])
print("Read elements:", arr[1:4:2])
self.assertTrue(allequal(a[1:4:2], arr[1:4:2]))
def test05_range(self):
"""Range element access, strided (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4:2])
print("Read elements:", arr[1:4:2])
self.assertTrue(allequal(a[1:4:2], arr[1:4:2]))
def test06_negativeIndex(self):
"""Negative Index element access (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original last element:", a[-1])
print("Read last element:", arr[-1])
self.assertTrue(allequal(a[-1], arr[-1]))
def test07_negativeIndex(self):
"""Negative Index element access (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original before last element:", a[-2])
print("Read before last element:", arr[-2])
if isinstance(a[-2], numpy.ndarray):
self.assertTrue(allequal(a[-2], arr[-2]))
else:
self.assertEqual(a[-2], arr[-2])
def test08_negativeRange(self):
"""Negative range element access (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
def test09_negativeRange(self):
"""Negative range element access (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen()
arr = self.h5file.root.somearray
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
class GI1NATestCase(GetItemTestCase, TestCase):
title = "Rank-1 case 1"
numericalList = numpy.array([3])
numericalListME = numpy.array([3, 2, 1, 0, 4, 5, 6])
charList = numpy.array(["3"], 'S')
charListME = numpy.array(
["321", "221", "121", "021", "421", "521", "621"], 'S')
class GI1NAOpenTestCase(GI1NATestCase):
close = 0
class GI1NACloseTestCase(GI1NATestCase):
close = 1
class GI2NATestCase(GetItemTestCase):
# A more complex example
title = "Rank-1,2 case 2"
numericalList = numpy.array([3, 4])
numericalListME = numpy.array([[3, 2, 1, 0, 4, 5, 6],
[2, 1, 0, 4, 5, 6, 7],
[4, 3, 2, 1, 0, 4, 5],
[3, 2, 1, 0, 4, 5, 6],
[3, 2, 1, 0, 4, 5, 6]])
charList = numpy.array(["a", "b"], 'S')
charListME = numpy.array(
[["321", "221", "121", "021", "421", "521", "621"],
["21", "21", "11", "02", "42", "21", "61"],
["31", "21", "12", "21", "41", "51", "621"],
["321", "221", "121", "021",
"421", "521", "621"],
["3241", "2321", "13216",
"0621", "4421", "5421", "a621"],
["a321", "s221", "d121", "g021", "b421", "5vvv21", "6zxzxs21"]], 'S')
class GI2NAOpenTestCase(GI2NATestCase):
close = 0
class GI2NACloseTestCase(GI2NATestCase):
close = 1
class SetItemTestCase(common.TempFileMixin, TestCase):
def test00_single(self):
"""Single element update (character types)"""
# Create the array under root and name 'somearray'
a = self.charList
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify a single element of a and arr:
a[0] = b"b"
arr[0] = b"b"
# Get and compare an element
if common.verbose:
print("Original first element:", a[0])
print("Read first element:", arr[0])
self.assertTrue(allequal(a[0], arr[0]))
def test01_single(self):
"""Single element update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalList
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
a[0] = 333
arr[0] = 333
# Get and compare an element
if common.verbose:
print("Original first element:", a[0])
print("Read first element:", arr[0])
self.assertEqual(a[0], arr[0])
def test02_range(self):
"""Range element update (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
a[1:3] = b"xXx"
arr[1:3] = b"xXx"
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4])
print("Read elements:", arr[1:4])
self.assertTrue(allequal(a[1:4], arr[1:4]))
def test03_range(self):
"""Range element update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(1, 3, None)
rng = numpy.arange(a[s].size)*2 + 3
rng.shape = a[s].shape
a[s] = rng
arr[s] = rng
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4])
print("Read elements:", arr[1:4])
self.assertTrue(allequal(a[1:4], arr[1:4]))
def test04_range(self):
"""Range element update, strided (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(1, 4, 2)
a[s] = b"xXx"
arr[s] = b"xXx"
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4:2])
print("Read elements:", arr[1:4:2])
self.assertTrue(allequal(a[1:4:2], arr[1:4:2]))
def test05_range(self):
"""Range element update, strided (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(1, 4, 2)
rng = numpy.arange(a[s].size)*2 + 3
rng.shape = a[s].shape
a[s] = rng
arr[s] = rng
# Get and compare an element
if common.verbose:
print("Original elements:", a[1:4:2])
print("Read elements:", arr[1:4:2])
self.assertTrue(allequal(a[1:4:2], arr[1:4:2]))
def test06_negativeIndex(self):
"""Negative Index element update (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = -1
a[s] = b"xXx"
arr[s] = b"xXx"
# Get and compare an element
if common.verbose:
print("Original last element:", a[-1])
print("Read last element:", arr[-1])
self.assertTrue(allequal(a[-1], arr[-1]))
def test07_negativeIndex(self):
"""Negative Index element update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = -2
a[s] = a[s]*2 + 3
arr[s] = arr[s]*2 + 3
# Get and compare an element
if common.verbose:
print("Original before last element:", a[-2])
print("Read before last element:", arr[-2])
if isinstance(a[-2], numpy.ndarray):
self.assertTrue(allequal(a[-2], arr[-2]))
else:
self.assertEqual(a[-2], arr[-2])
def test08_negativeRange(self):
"""Negative range element update (character types)"""
# Create the array under root and name 'somearray'
a = self.charListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(-4, -1, None)
a[s] = b"xXx"
arr[s] = b"xXx"
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
def test09_negativeRange(self):
"""Negative range element update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of a and arr:
s = slice(-3, -1, None)
rng = numpy.arange(a[s].size)*2 + 3
rng.shape = a[s].shape
a[s] = rng
arr[s] = rng
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
def test10_outOfRange(self):
"""Out of range update (numerical types)"""
# Create the array under root and name 'somearray'
a = self.numericalListME
arr = self.h5file.create_array(
self.h5file.root, 'somearray', a, "Some array")
if self.close:
self._reopen('a')
arr = self.h5file.root.somearray
# Modify elements of arr that are out of range:
s = slice(1, a.shape[0]+1, None)
s2 = slice(1, 1000, None)
rng = numpy.arange(a[s].size)*2 + 3
rng.shape = a[s].shape
a[s] = rng
rng2 = numpy.arange(a[s2].size)*2 + 3
rng2.shape = a[s2].shape
arr[s2] = rng2
# Get and compare an element
if common.verbose:
print("Original last elements:", a[-4:-1])
print("Read last elements:", arr[-4:-1])
self.assertTrue(allequal(a[-4:-1], arr[-4:-1]))
class SI1NATestCase(SetItemTestCase, TestCase):
title = "Rank-1 case 1"
numericalList = numpy.array([3])
numericalListME = numpy.array([3, 2, 1, 0, 4, 5, 6])
charList = numpy.array(["3"], 'S')
charListME = numpy.array(
["321", "221", "121", "021", "421", "521", "621"], 'S')
class SI1NAOpenTestCase(SI1NATestCase):
close = 0
class SI1NACloseTestCase(SI1NATestCase):
close = 1
class SI2NATestCase(SetItemTestCase):
# A more complex example
title = "Rank-1,2 case 2"
numericalList = | numpy.array([3, 4]) | numpy.array |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script for running experiments.
Example to run locally:
python experiments.py --output_dir=may19_3d --hidden_state_dim=3 \
--min_seq_len=100 --max_seq_len=2000 --num_sampled_seq_len=20 \
--num_systems=100 --num_repeat=100 \
--cluster_center_dist_lower_bound=0.1 --hide_inputs=true
The outputs will show up in output_dir may19_3d.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import logging
import os
# pylint: disable=g-bad-import-order
from absl import app
from absl import flags
import matplotlib
matplotlib.use('Agg')
from matplotlib import pylab # pylint: disable=g-import-not-at-top
import numpy as np
import pandas as pd
import seaborn as sns
import six
import sklearn
import tqdm
import arma
import clustering
import lds
sns.set(style='whitegrid')
FLAGS = flags.FLAGS
# Flags for IO and plotting.
flags.DEFINE_string('output_dir', None, 'Output filepath.')
flags.DEFINE_boolean(
'load_results', False, 'Whether to skip experiments '
'and only plot existing results from output_dir.')
flags.DEFINE_boolean(
'plot_clusters', False, 'Whether to visualize each '
'experiment run and plot clustering results.')
# Flags for generating simulated clusters of LDSs.
flags.DEFINE_boolean('generate_diagonalizable_only', False, 'Whether to only '
'generate diagonalizable LDSs.')
flags.DEFINE_integer('num_clusters', 2, 'Number of clusters in experiments.')
flags.DEFINE_integer('num_systems', 100,
'Number of dynamical systems to cluster.')
flags.DEFINE_integer('hidden_state_dim', 2, 'Hidden state dim in experiments.')
flags.DEFINE_integer('input_dim', 1, 'Input dim in experiments.')
flags.DEFINE_boolean(
'hide_inputs', True, 'Whether the inputs are observable '
'to the clustering algorithm.')
flags.DEFINE_spaceseplist(
'cluster_center_eigvalues', None, 'Optional List of lists of eigenvalues '
'for each cluster. The outer list is space separated, and the inner list '
'is comma separated. E.g. `0.9,0.1 0.5,0.1`. When null, generate random '
'clusters centers by drawing eigenvalues uniformly from [-1, 1].')
flags.DEFINE_float(
'cluster_center_dist_lower_bound', 0.2, 'Desired distance lower bound '
'between cluster centers. Only applicable when cluster_center_eigvalues '
'is None. Generate cluster centers until distance >= this val.')
flags.DEFINE_float('cluster_radius', 0.05,
'Radius of each dynamical system cluster.')
flags.DEFINE_integer('random_seed', 0, 'Random seed.')
flags.DEFINE_integer('num_repeat', 1,
'Number of repeated runs for each fixed seq len.')
# Flags for output sequences from LDSs.
flags.DEFINE_integer('min_seq_len', 10, 'Min seq len in experiments.')
flags.DEFINE_integer('max_seq_len', 1000, 'Max seq len in experiments.')
flags.DEFINE_integer(
'num_sampled_seq_len', 10, 'Number of sampled seq len '
'values in between min and max seq len.')
flags.DEFINE_float('input_mean', 0.0, 'Input mean.')
flags.DEFINE_float('input_stddev', 1.0, 'Input stddev.')
flags.DEFINE_float('output_noise_stddev', 0.01, 'Output noise stddev.')
flags.DEFINE_float('init_state_mean', 0.0, 'Init state mean.')
flags.DEFINE_float('init_state_stddev', 0.0, 'Init state stddev.')
# Flags for hparams in clustering algorithms.
flags.DEFINE_integer('guessed_hidden_dim', 0,
'Assumed hidden dim. If 0, use true hidden dim.')
flags.DEFINE_integer(
'guessed_num_clusters', 0,
'Desired number of clusters. If 0, find best number '
'adaptively from maximizing kmeans objective score.')
flags.DEFINE_integer(
'LDS_GIBBS_num_update_samples', 100, 'Number of update '
'samples used for fitting LDS in pylds package.')
flags.DEFINE_integer('spectral_filtering_num_filters', 25, 'Number of filters '
'used in spectral filtering method.')
flags.DEFINE_float('spectral_filtering_learning_rate', 0.0001, 'Learning rate '
'in spectral filtering method.')
# Flags for whether to include certain baselines.
flags.DEFINE_boolean(
'include_LDS_MLE', False, 'Whether to include MLE '
'estimation for LDS in the experiments. Could be slow.')
flags.DEFINE_boolean(
'include_tslearn', True, 'Whether to include time series '
'clustering methods from the tslearn package in the '
'experiments.')
flags.DEFINE_boolean(
'include_tslearn_slow', False, 'Whether to include time '
'series clustering methods from the tslearn package '
'that are slow: DTW and GAK.')
flags.DEFINE_boolean('include_LDS_GIBBS', True, 'Whether to include the '
'Gibbs sampling method for LDS.')
flags.DEFINE_boolean('include_ARMA_MLE', False, 'Whether to include the '
'MLE method for ARMA.')
def create_model_fns(hdim):
"""Util function to create model fns to fit model params to sequences.
Args:
hdim: Guessed hidden dimension for model fitting.
Returns:
A dictionary mapping method names to model_fns. Each model_fn
takes output seq and input seq, and returns fitted model params.
"""
model_fns = collections.OrderedDict()
# Using raw outputs.
# model_fns['raw_output'] = lambda o, i: o
# pylint: disable=g-long-lambda
# Pure AR.
model_fns['AR'] = lambda o, i: arma.fit_ar(o, i, hdim)
# Iterated regression without regularization and with regularization.
model_fns['ARMA_OLS'] = lambda o, i: arma.fit_arma_iter(o, i, hdim)
model_fns['ARMA_RLS'] = lambda o, i: arma.fit_arma_iter(
o, i, hdim, l2_reg=0.01)
# Fit AR model and cluster based on AR param roots.
# model_fns['AR_roots'] = lambda o, i: arma.get_eig_from_arparams(
# arma.fit_ar(o, i, hdim))
# Fit ARMA model and cluster based on AR param roots.
# model_fns['ARMA_OLS_roots'] = lambda o, i: arma.get_eig_from_arparams(
# arma.fit_arma_iter(o, i, hdim))
# model_fns['ARMA_RLS_roots_0.01'] = lambda o, i: arma.get_eig_from_arparams(
# arma.fit_arma_iter(o, i, hdim, l2_reg=0.01))
if FLAGS.include_LDS_GIBBS:
model_fns['LDS_GIBBS'] = lambda o, i: lds.fit_lds_gibbs(
o, i, hdim, num_update_samples=FLAGS.LDS_GIBBS_num_update_samples)
if FLAGS.include_ARMA_MLE:
model_fns['ARMA_MLE'] = lambda o, i: arma.fit_arma_mle(o, i, hdim)
if FLAGS.include_LDS_MLE:
model_fns['LDS_MLE'] = lambda o, i: lds.fit_lds_mle(o, i, hdim)
return model_fns
def _compose_model_fn(model_fn):
if FLAGS.hide_inputs:
return lambda seq: model_fn(seq.outputs, None)
return lambda seq: model_fn(seq.outputs, seq.inputs)
def _create_pca_model_fn(pca_model):
return lambda o, _: pca_model.transform(o.flatten()).flatten()
# pylint: disable=g-doc-args
def get_results(cluster_center_eigvalues,
cluster_center_dist_lower_bound,
hidden_state_dim,
input_dim,
guessed_hidden_dim,
num_clusters,
guessed_num_clusters,
min_seq_len,
max_seq_len,
num_sampled_seq_len,
num_repeat,
num_systems,
cluster_radius,
input_mean,
input_stddev,
output_noise_stddev,
init_state_mean=0.0,
init_state_stddev=0.0,
generate_diagonalizable_only=False,
random_seed=0,
results_path=None):
"""Get results for varying sequence lengths.
Args:
cluster_center_eigvalues: List of lists of eigenvalues for each cluster.
E.g. [[0.9,0.1], [0.5,0.1], [0.2,0.2], or None. If None, eigenvalues will
be generated from uniform(-1,1) with respect to
cluster_center_dist_lower_bound.
cluster_center_dist_lower_bound: Desired distance lower bound between
clusters. When generating cluster centers, try repeatedly until distance
is greater than cluster_center_dist_lower_bound.
hidden_state_dim: True hidden state dim.
input_dim: The input dim.
guessed_hidden_dim: Assumed hidden dim. If 0, use true hidden dim.
num_clusters: True number of clusters.
guessed_num_clusters: Desired number of clusters. If 0, use true number.
min_seq_len: Min seq len in experiments.
max_seq_len: Max seq len in experiments.
num_sampled_seq_len: Number of sampled seq len values in between min and max
seq len.
num_repeat: Number of repeated experiments for each seq_len.
num_systems: Number of dynamical system in each clustering experiments.
cluster_radius: Expected distance of generated systems from cluster centers.
input_mean: Scalar or 1D array of length hidden state dim.
input_stddev: Scalar of 1D array of length hidden state dim.
output_noise_stddev: Scalar.
init_state_mean: Scalar or 1D array of length hidden state dim.
init_state_stddev: Scalar of 1D array of length hidden state dim.
random_seed: Random seed, integer.
Returns:
A pandas DataFrame with columns `method`, `seq_len`, `t_secs`,
`failed_ratio`, and columns for clustering metrics such as `adj_mutual_info`
and `v_measure`. The same method and seq_len will appear in num_repeat many
rows.
"""
if cluster_center_eigvalues is not None:
if len(cluster_center_eigvalues) <= 1:
raise ValueError('Need at least two cluster centers.')
cluster_center_eigvalues = np.array(cluster_center_eigvalues)
if cluster_center_eigvalues.shape != (num_clusters, hidden_state_dim):
raise ValueError(
'Cluter center eig has shape %s, expected (%d, %d).' %
(str(cluster_center_eigvalues.shape), num_clusters, hidden_state_dim))
| np.random.seed(random_seed) | numpy.random.seed |
import numpy as np
from pyfmmlib2d import FMM
from pyfmmlib2d.periodized.real_laplace import periodized_laplace_fmm
from pyfmmlib2d.utilities.random import float_random, complex_random
import matplotlib as mpl
import matplotlib.pyplot as plt
from pyfmmlib2d import RFMM
import time
plt.ion()
################################################################################
# Periodic Laplace FMM
# tested against FFT based grid solve; compared outside of support of forces
# the grid has been spaced so a grid node comes within ~10^{-12} of a check pt
# should work in all corners and sides
n_grid = 150
# get uniform grid on [0, 2π]
grid_v, grid_h = np.linspace(0, 2*np.pi, n_grid, endpoint=False, retstep=True)
grid_xv = grid_v + 3.381898123243e-02
grid_yv = grid_v + 1e-12
grid_x, grid_y = np.meshgrid(grid_xv, grid_yv, indexing='ij')
grid = np.row_stack([grid_x.ravel(), grid_y.ravel()])
k = np.fft.fftfreq(n_grid, grid_h/(2*np.pi))
kx, ky = np.meshgrid(k, k, indexing='ij')
k2 = kx*kx + ky*ky
k2[0,0] = np.Inf
ilap = -1.0/k2
# put a test pulse at location in center
k = 30
for center_x in (0.0, np.pi, 2*np.pi):
for center_y in (0.0, np.pi, 2*np.pi):
print('\nCenter x: {:0.2f}'.format(center_x))
print( 'Center y: {:0.2f}'.format(center_y))
def get_shift(xs, ys):
lax = center_x+0.2 + xs
lay = center_y+0.2 + ys
lbx = center_x-0.2 + xs
lby = center_y-0.2 + ys
d2a = (grid_x-lax)**2 + (grid_y-lay)**2
d2b = (grid_x-lbx)**2 + (grid_y-lby)**2
fa = np.exp(-k*d2a)
fb = -np.exp(-k*d2b)
return fa + fb
f = np.zeros_like(grid_x)
shift_vec = [-2*np.pi, 0.0, 2*np.pi]
for xs in shift_vec:
for ys in shift_vec:
f += get_shift(xs, ys)
# solve Poisson problem on grid
ua = np.fft.ifft2(ilap*np.fft.fft2(f)).real
# get charge
qw = grid_h**2
charge = f.ravel() * qw / (2*np.pi)
# evaluate FMM
st1 = time.time()
pfmm = periodized_laplace_fmm(p=16, N=4)
st2 = time.time()
out = pfmm(grid, charge=charge, compute_source_potential=True)
periodized_time_with_setup = time.time() - st1
periodized_time_without_setup = time.time() - st2
ue = out['self']['u'].reshape([n_grid, n_grid])
# compare to raw FMM speed
st = time.time()
_ = RFMM(grid, grid, charge=charge, compute_source_potential=True)
raw_time = time.time() - st
# now ignore everything well outside of the support of f
r = 2
bad = np.zeros(grid_x.shape, dtype=bool)
for xs in shift_vec:
for ys in shift_vec:
d2 = (grid_x-center_x-xs)**2 + (grid_y-center_y-ys)**2
bad = np.logical_or(bad, d2<r)
good = ~bad
# get error with adjusting mean
ue -= np.mean(ue)
error = np.abs(ue-ua)/np.abs(ua[good]).max()
me = | np.ma.array(error, mask=bad) | numpy.ma.array |
import numpy as np
import scipy.optimize as optimization
import matplotlib.pyplot as plt
try:
from submm_python_routines.KIDs import calibrate
except:
from KIDs import calibrate
from numba import jit # to get working on python 2 I had to downgrade llvmlite pip install llvmlite==0.31.0
# module for fitting resonances curves for kinetic inductance detectors.
# written by <NAME> 12/21/16
# for example see test_fit.py in this directory
# To Do
# I think the error analysis on the fit_nonlinear_iq_with_err probably needs some work
# add in step by step fitting i.e. first amplitude normalizaiton, then cabel delay, then i0,q0 subtraction, then phase rotation, then the rest of the fit.
# need to have fit option that just specifies tau becuase that never really changes for your cryostat
#Change log
#JDW 2017-08-17 added in a keyword/function to allow for gain varation "amp_var" to be taken out before fitting
#JDW 2017-08-30 added in fitting for magnitude fitting of resonators i.e. not in iq space
#JDW 2018-03-05 added more clever function for guessing x0 for fits
#JDW 2018-08-23 added more clever guessing for resonators with large phi into guess seperate functions
J=np.exp(2j*np.pi/3)
Jc=1/J
@jit(nopython=True)
def cardan(a,b,c,d):
'''
analytical root finding fast: using numba looks like x10 speed up
returns only the largest real root
'''
u=np.empty(2,np.complex128)
z0=b/3/a
a2,b2 = a*a,b*b
p=-b2/3/a2 +c/a
q=(b/27*(2*b2/a2-9*c/a)+d)/a
D=-4*p*p*p-27*q*q
r=np.sqrt(-D/27+0j)
u=((-q-r)/2)**(1/3.)#0.33333333333333333333333
v=((-q+r)/2)**(1/3.)#0.33333333333333333333333
w=u*v
w0=np.abs(w+p/3)
w1=np.abs(w*J+p/3)
w2=np.abs(w*Jc+p/3)
if w0<w1:
if w2<w0 : v*=Jc
elif w2<w1 : v*=Jc
else: v*=J
roots = np.asarray((u+v-z0, u*J+v*Jc-z0,u*Jc+v*J-z0))
#print(roots)
where_real = np.where(np.abs(np.imag(roots)) < 1e-15)
#if len(where_real)>1: print(len(where_real))
#print(D)
if D>0: return np.max(np.real(roots)) # three real roots
else: return np.real(roots[np.argsort(np.abs(np.imag(roots)))][0]) #one real root get the value that has smallest imaginary component
#return np.max(np.real(roots[where_real]))
#return np.asarray((u+v-z0, u*J+v*Jc-z0,u*Jc+v*J-z0))
# function to descript the magnitude S21 of a non linear resonator
@jit(nopython=True)
def nonlinear_mag(x,fr,Qr,amp,phi,a,b0,b1,flin):
'''
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readout system
# a is the non-linearity paramter bifurcation occurs at a = 0.77
# b0 DC level of s21 away from resonator
# b1 Frequency dependant gain varation
# flin is probably the frequency of the resonator when a = 0
#
# This is based of fitting code from MUSIC
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# / (j phi) (j phi) \ 2
#|S21|^2 = (b0+b1 x_lin)* |1 -amp*e^ +amp*(e^ -1) |^
# | ------------ ---- |
# \ (1+ 2jy) 2 /
#
# where the nonlineaity of y is described by the following eqution taken from Response of superconducting microresonators
# with nonlinear kinetic inductance
# yg = y+ a/(1+y^2) where yg = Qr*xg and xg = (f-fr)/fr
#
'''
xlin = (x - flin)/flin
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
#find the roots of the y equation above
for i in range(0,x.shape[0]):
# 4y^3+ -4yg*y^2+ y -(yg+a)
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#roots = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
#print(roots)
#roots = np.roots((16.,-16.*yg[i],8.,-8.*yg[i]+4*a*yg[i]/Qr-4*a,1.,-yg[i]+a*yg[i]/Qr-a+a**2/Qr)) #more accurate version that doesn't seem to change the fit at al
# only care about real roots
#where_real = np.where(np.imag(roots) == 0)
#where_real = np.where(np.abs(np.imag(roots)) < 1e-10) #analytic version has some floating point error accumulation
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))#np.max(np.real(roots[where_real]))
z = (b0 +b1*xlin)*np.abs(1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))**2
return z
@jit(nopython=True)
def linear_mag(x,fr,Qr,amp,phi,b0):
'''
# simplier version for quicker fitting when applicable
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readout system
# b0 DC level of s21 away from resonator
#
# This is based of fitting code from MUSIC
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# / (j phi) (j phi) \ 2
#|S21|^2 = (b0)* |1 -amp*e^ +amp*(e^ -1) |^
# | ------------ ---- |
# \ (1+ 2jxg) 2 /
#
# no y just xg
# with no nonlinear kinetic inductance
'''
if not np.isscalar(fr): #vectorize
x = np.reshape(x,(x.shape[0],1,1,1,1,1))
xg = (x-fr)/fr
z = (b0)*np.abs(1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*xg*Qr) + amp/2.*(np.exp(1.0j*phi) -1.0))**2
return z
# function to describe the i q loop of a nonlinear resonator
@jit(nopython=True)
def nonlinear_iq(x,fr,Qr,amp,phi,a,i0,q0,tau,f0):
'''
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readou system
# a is the non-linearity paramter bifurcation occurs at a = 0.77
# i0
# q0 these are constants that describes an overall phase rotation of the iq loop + a DC gain offset
# tau cabel delay
# f0 is all the center frequency, not sure why we include this as a secondary paramter should be the same as fr
#
# This is based of fitting code from MUSIC
#
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# (-j 2 pi deltaf tau) / (j phi) (j phi) \
# (i0+j*q0)*e^ *|1 -amp*e^ +amp*(e^ -1) |
# | ------------ ---- |
# \ (1+ 2jy) 2 /
#
# where the nonlineaity of y is described by the following eqution taken from Response of superconducting microresonators
# with nonlinear kinetic inductance
# yg = y+ a/(1+y^2) where yg = Qr*xg and xg = (f-fr)/fr
#
'''
deltaf = (x - f0)
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
#find the roots of the y equation above
for i in range(0,x.shape[0]):
# 4y^3+ -4yg*y^2+ y -(yg+a)
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#roots = np.roots((16.,-16.*yg[i],8.,-8.*yg[i]+4*a*yg[i]/Qr-4*a,1.,-yg[i]+a*yg[i]/Qr-a+a**2/Qr)) #more accurate version that doesn't seem to change the fit at al
# only care about real roots
#where_real = np.where(np.imag(roots) == 0)
#y[i] = np.max(np.real(roots[where_real]))
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
z = (i0 +1.j*q0)* np.exp(-1.0j* 2* np.pi *deltaf*tau) * (1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))
return z
def nonlinear_iq_for_fitter(x,fr,Qr,amp,phi,a,i0,q0,tau,f0,**keywords):
'''
when using a fitter that can't handel complex number
one needs to return both the real and imaginary components seperatly
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
print("hello")
else:
use_given_tau = False
deltaf = (x - f0)
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
for i in range(0,x.shape[0]):
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#where_real = np.where(np.imag(roots) == 0)
#y[i] = np.max(np.real(roots[where_real]))
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
z = (i0 +1.j*q0)* np.exp(-1.0j* 2* np.pi *deltaf*tau) * (1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))
real_z = np.real(z)
imag_z = np.imag(z)
return np.hstack((real_z,imag_z))
def brute_force_linear_mag_fit(x,z,ranges,n_grid_points,error = None, plot = False,**keywords):
'''
x frequencies Hz
z complex or abs of s21
ranges is the ranges for each parameter i.e. np.asarray(([f_low,Qr_low,amp_low,phi_low,b0_low],[f_high,Qr_high,amp_high,phi_high,b0_high]))
n_grid_points how finely to sample each parameter space.
this can be very slow for n>10
an increase by a factor of 2 will take 2**5 times longer
to marginalize over you must minimize over the unwanted axies of sum_dev
i.e for fr np.min(np.min(np.min(np.min(fit['sum_dev'],axis = 4),axis = 3),axis = 2),axis = 1)
'''
if error is None:
error = np.ones(len(x))
fs = np.linspace(ranges[0][0],ranges[1][0],n_grid_points)
Qrs = np.linspace(ranges[0][1],ranges[1][1],n_grid_points)
amps = np.linspace(ranges[0][2],ranges[1][2],n_grid_points)
phis = np.linspace(ranges[0][3],ranges[1][3],n_grid_points)
b0s = np.linspace(ranges[0][4],ranges[1][4],n_grid_points)
evaluated_ranges = np.vstack((fs,Qrs,amps,phis,b0s))
a,b,c,d,e = np.meshgrid(fs,Qrs,amps,phis,b0s,indexing = "ij") #always index ij
evaluated = linear_mag(x,a,b,c,d,e)
data_values = np.reshape(np.abs(z)**2,(abs(z).shape[0],1,1,1,1,1))
error = np.reshape(error,(abs(z).shape[0],1,1,1,1,1))
sum_dev = np.sum(((np.sqrt(evaluated)-np.sqrt(data_values))**2/error**2),axis = 0) # comparing in magnitude space rather than magnitude squared
min_index = np.where(sum_dev == np.min(sum_dev))
index1 = min_index[0][0]
index2 = min_index[1][0]
index3 = min_index[2][0]
index4 = min_index[3][0]
index5 = min_index[4][0]
fit_values = np.asarray((fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5]))
fit_values_names = ('f0','Qr','amp','phi','b0')
fit_result = linear_mag(x,fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5])
marginalized_1d = np.zeros((5,n_grid_points))
marginalized_1d[0,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2),axis = 1)
marginalized_1d[1,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2),axis = 0)
marginalized_1d[2,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 1),axis = 0)
marginalized_1d[3,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 1),axis = 0)
marginalized_1d[4,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 1),axis = 0)
marginalized_2d = np.zeros((5,5,n_grid_points,n_grid_points))
#0 _
#1 x _
#2 x x _
#3 x x x _
#4 x x x x _
# 0 1 2 3 4
marginalized_2d[0,1,:] = marginalized_2d[1,0,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2)
marginalized_2d[2,0,:] = marginalized_2d[0,2,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 1)
marginalized_2d[2,1,:] = marginalized_2d[1,2,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 0)
marginalized_2d[3,0,:] = marginalized_2d[0,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 1)
marginalized_2d[3,1,:] = marginalized_2d[1,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 0)
marginalized_2d[3,2,:] = marginalized_2d[2,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 1),axis = 0)
marginalized_2d[4,0,:] = marginalized_2d[0,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 1)
marginalized_2d[4,1,:] = marginalized_2d[1,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 0)
marginalized_2d[4,2,:] = marginalized_2d[2,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 1),axis = 0)
marginalized_2d[4,3,:] = marginalized_2d[3,4,:] = np.min(np.min(np.min(sum_dev,axis = 2),axis = 1),axis = 0)
if plot:
levels = [2.3,4.61] #delta chi squared two parameters 68 90 % confidence
fig_fit = plt.figure(-1)
axs = fig_fit.subplots(5, 5)
for i in range(0,5): # y starting from top
for j in range(0,5): #x starting from left
if i > j:
#plt.subplot(5,5,i+1+5*j)
#axs[i, j].set_aspect('equal', 'box')
extent = [evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1],evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1]]
axs[i,j].imshow(marginalized_2d[i,j,:]-np.min(sum_dev),extent =extent,origin = 'lower', cmap = 'jet')
axs[i,j].contour(evaluated_ranges[j],evaluated_ranges[i],marginalized_2d[i,j,:]-np.min(sum_dev),levels = levels,colors = 'white')
axs[i,j].set_ylim(evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1])
axs[i,j].set_xlim(evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1])
axs[i,j].set_aspect((evaluated_ranges[j,0]-evaluated_ranges[j,n_grid_points-1])/(evaluated_ranges[i,0]-evaluated_ranges[i,n_grid_points-1]))
if j == 0:
axs[i, j].set_ylabel(fit_values_names[i])
if i == 4:
axs[i, j].set_xlabel("\n"+fit_values_names[j])
if i<4:
axs[i,j].get_xaxis().set_ticks([])
if j>0:
axs[i,j].get_yaxis().set_ticks([])
elif i < j:
fig_fit.delaxes(axs[i,j])
for i in range(0,5):
#axes.subplot(5,5,i+1+5*i)
axs[i,i].plot(evaluated_ranges[i,:],marginalized_1d[i,:]-np.min(sum_dev))
axs[i,i].plot(evaluated_ranges[i,:],np.ones(len(evaluated_ranges[i,:]))*1.,color = 'k')
axs[i,i].plot(evaluated_ranges[i,:],np.ones(len(evaluated_ranges[i,:]))*2.7,color = 'k')
axs[i,i].yaxis.set_label_position("right")
axs[i,i].yaxis.tick_right()
axs[i,i].xaxis.set_label_position("top")
axs[i,i].xaxis.tick_top()
axs[i,i].set_xlabel(fit_values_names[i])
#axs[0,0].set_ylabel(fit_values_names[0])
#axs[4,4].set_xlabel(fit_values_names[4])
axs[4,4].xaxis.set_label_position("bottom")
axs[4,4].xaxis.tick_bottom()
#make a dictionary to return
fit_dict = {'fit_values': fit_values,'fit_values_names':fit_values_names, 'sum_dev': sum_dev, 'fit_result': fit_result,'marginalized_2d':marginalized_2d,'marginalized_1d':marginalized_1d,'evaluated_ranges':evaluated_ranges}#, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_iq(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
# tau forces tau to specific value
# tau_guess fixes the guess for tau without have to specifiy all of x0
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
else:
use_given_tau = False
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),50,.01,-np.pi,0,-np.inf,-np.inf,0,np.min(x)],[np.max(x),200000,1,np.pi,5,np.inf,np.inf,1*10**-6,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.mean(np.real(z)),np.mean(np.imag(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear(x,z,verbose = True)
print(x0)
if ('fr_guess' in keywords):
x0[0] = keywords['fr_guess']
if ('tau_guess' in keywords):
x0[7] = keywords['tau_guess']
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
if use_given_tau == True:
del bounds[0][7]
del bounds[1][7]
del x0[7]
fit = optimization.curve_fit(lambda x_lamb,a,b,c,d,e,f,g,h: nonlinear_iq_for_fitter(x_lamb,a,b,c,d,e,f,g,tau,h), x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],tau,fit[0][7])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],tau,x0[7])
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_iq_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above funciton but takes fine and gain scans seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(fine_x),500.,.01,-np.pi,0,-np.inf,-np.inf,1*10**-9,np.min(fine_x)],[np.max(fine_x),1000000,1,np.pi,5,np.inf,np.inf,1*10**-6,np.max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.mean(np.real(z)),np.mean(np.imag(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
#print(x0)
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
x = np.hstack((fine_x,gain_x))
z = np.hstack((fine_z,gain_z))
if use_err:
z_err = np.hstack((fine_z_err,gain_z_err))
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
if use_err:
z_err_stacked = np.hstack((np.real(z_err),np.imag(z_err)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,sigma = z_err_stacked,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
if use_err:
#only do it for fine data
#red_chi_sqr = np.sum(z_stacked-np.hstack((np.real(fit_result),np.imag(fit_result))))**2/z_err_stacked**2)/(len(z_stacked)-8.)
#only do it for fine data
red_chi_sqr = np.sum((np.hstack((np.real(fine_z),np.imag(fine_z)))-np.hstack((np.real(fit_result[0:len(fine_z)]),np.imag(fit_result[0:len(fine_z)]))))**2/np.hstack((np.real(fine_z_err),np.imag(fine_z_err)))**2)/(len(fine_z)*2.-8.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
# same function but double fits so that it can get error and a proper covariance matrix out
def fit_nonlinear_iq_with_err(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),2000,.01,-np.pi,0,-5,-5,1*10**-9,np.min(x)],[np.max(x),200000,1,np.pi,5,5,5,1*10**-6,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[np.argmin(np.abs(z))]
x0 = guess_x0_iq_nonlinear(x,z)
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
fit_result_stacked = nonlinear_iq_for_fitter(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
# get error
var = np.sum((z_stacked-fit_result_stacked)**2)/(z_stacked.shape[0] - 1)
err = np.ones(z_stacked.shape[0])*np.sqrt(var)
# refit
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,err,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_mag(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),100,.01,-np.pi,0,-np.inf,-np.inf,np.min(x)],[np.max(x),200000,1,np.pi,5,np.inf,np.inf,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.abs(z[0])**2,np.abs(z[0])**2,fr_guess]
x0 = guess_x0_mag_nonlinear(x,z,verbose = True)
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_mag_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above but fine and gain scans are provided seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(fine_x),100,.01,-np.pi,0,-np.inf,-np.inf,np.min(fine_x)],[np.max(fine_x),1000000,100,np.pi,5,np.inf,np.inf,np.max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
x0 = guess_x0_mag_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
#stack the scans for curvefit
x = np.hstack((fine_x,gain_x))
z = np.hstack((fine_z,gain_z))
if use_err:
z_err = np.hstack((fine_z_err,gain_z_err))
z_err = np.sqrt(4*np.real(z_err)**2*np.real(z)**2+4*np.imag(z_err)**2*np.imag(z)**2) #propogation of errors left out cross term
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,sigma = z_err,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#compute reduced chi squared
print(len(z))
if use_err:
#red_chi_sqr = np.sum((np.abs(z)**2-fit_result)**2/z_err**2)/(len(z)-7.)
# only use fine scan for reduced chi squared.
red_chi_sqr = np.sum((np.abs(fine_z)**2-fit_result[0:len(fine_z)])**2/z_err[0:len(fine_z)]**2)/(len(fine_z)-7.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
def amplitude_normalization(x,z):
'''
# normalize the amplitude varation requires a gain scan
#flag frequencies to use in amplitude normaliztion
'''
index_use = np.where(np.abs(x-np.median(x))>100000) #100kHz away from resonator
poly = np.polyfit(x[index_use],np.abs(z[index_use]),2)
poly_func = np.poly1d(poly)
normalized_data = z/poly_func(x)*np.median(np.abs(z[index_use]))
return normalized_data
def amplitude_normalization_sep(gain_x,gain_z,fine_x,fine_z,stream_x,stream_z):
'''
# normalize the amplitude varation requires a gain scan
# uses gain scan to normalize does not use fine scan
#flag frequencies to use in amplitude normaliztion
'''
index_use = np.where(np.abs(gain_x-np.median(gain_x))>100000) #100kHz away from resonator
poly = np.polyfit(gain_x[index_use],np.abs(gain_z[index_use]),2)
poly_func = np.poly1d(poly)
poly_data = poly_func(gain_x)
normalized_gain = gain_z/poly_data*np.median(np.abs(gain_z[index_use]))
normalized_fine = fine_z/poly_func(fine_x)*np.median(np.abs(gain_z[index_use]))
normalized_stream = stream_z/poly_func(stream_x)*np.median(np.abs(gain_z[index_use]))
amp_norm_dict = {'normalized_gain':normalized_gain,
'normalized_fine':normalized_fine,
'normalized_stream':normalized_stream,
'poly_data':poly_data}
return amp_norm_dict
def guess_x0_iq_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_iq_nonlinear_sep
# below. it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = np.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
df = np.abs(x-np.roll(x,1))
fine_df = np.min(df[np.where(df != 0)])
fine_z_index = np.where(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = np.where(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(z))
#fr_guess = x[fr_guess_index]
fr_guess_index_fine = np.argmin(np.abs(fine_z))
# below breaks if there is not a right and left side in the fine scan
if fr_guess_index_fine == 0:
fr_guess_index_fine = len(fine_x)//2
elif fr_guess_index_fine == (len(fine_x)-1):
fr_guess_index_fine = len(fine_x)//2
fr_guess = fine_x[fr_guess_index_fine]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index_fine:-1]
left = half_distance[0:fr_guess_index_fine]
right_index = np.argmin(np.abs(right))+fr_guess_index_fine
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(z)))-np.min(20*np.log10(np.abs(z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
phi_guess = 0
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between min and max distance between iq points in fine sweep
a_guess = 0
#i0 and iq guess
if np.max(np.abs(fine_z))==np.max(np.abs(z)): #if the resonator has an impedance mismatch rotation that makes the fine greater that the cabel delay
i0_guess = np.real(fine_z[np.argmax(np.abs(fine_z))])
q0_guess = np.imag(fine_z[np.argmax(np.abs(fine_z))])
else:
i0_guess = (np.real(fine_z[0])+np.real(fine_z[-1]))/2.
q0_guess = (np.imag(fine_z[0])+np.imag(fine_z[-1]))/2.
#cabel delay guess tau
#y = mx +b
#m = (y2 - y1)/(x2-x1)
#b = y-mx
if len(gain_z)>1: #is there a gain scan?
m = (gain_phase - np.roll(gain_phase,1))/(gain_x-np.roll(gain_x,1))
b = gain_phase -m*gain_x
m_best = np.median(m[~np.isnan(m)])
tau_guess = m_best/(2*np.pi)
else:
tau_guess = 3*10**-9
if verbose == True:
print("fr guess = %.2f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("i0 guess = %.2f" %i0_guess)
print("q0 guess = %.2f" %q0_guess)
print("tau guess = %.2f x 10^-7" %(tau_guess/10**-7))
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,i0_guess,q0_guess,tau_guess,fr_guess]
return x0
def guess_x0_mag_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_mag_nonlinear_sep
#below it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = np.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
#this will probably break if there is no fine scan
df = np.abs(x-np.roll(x,1))
fine_df = np.min(df[np.where(df != 0)])
fine_z_index = np.where(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = np.where(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(z))
#fr_guess = x[fr_guess_index]
fr_guess_index_fine = np.argmin(np.abs(fine_z))
if fr_guess_index_fine == 0:
fr_guess_index_fine = len(fine_x)//2
elif fr_guess_index_fine == (len(fine_x)-1):
fr_guess_index_fine = len(fine_x)//2
fr_guess = fine_x[fr_guess_index_fine]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index_fine:-1]
left = half_distance[0:fr_guess_index_fine]
right_index = np.argmin(np.abs(right))+fr_guess_index_fine
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(z)))-np.min(20*np.log10(np.abs(z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
phi_guess = 0
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between min and max distance between iq points in fine sweep
a_guess = 0
#b0 and b1 guess
if len(gain_z)>1:
xlin = (gain_x - fr_guess)/fr_guess
b1_guess = (np.abs(gain_z)[-1]**2-np.abs(gain_z)[0]**2)/(xlin[-1]-xlin[0])
else:
xlin = (fine_x - fr_guess)/fr_guess
b1_guess = (np.abs(fine_z)[-1]**2-np.abs(fine_z)[0]**2)/(xlin[-1]-xlin[0])
b0_guess = np.median(np.abs(gain_z)**2)
if verbose == True:
print("fr guess = %.2f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("phi guess = %.2f" %phi_guess)
print("b0 guess = %.2f" %b0_guess)
print("b1 guess = %.2f" %b1_guess)
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,b0_guess,b1_guess,fr_guess]
return x0
def guess_x0_iq_nonlinear_sep(fine_x,fine_z,gain_x,gain_z,verbose = False):
'''
# this is the same as guess_x0_iq_nonlinear except that it takes
# takes the fine scan and the gain scan as seperate variables
# this runs into less issues when trying to sort out what part of
# data is fine and what part is gain for the guessing
#make sure data is sorted from low to high frequency
'''
#gain phase
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(fine_z))
# below breaks if there is not a right and left side in the fine scan
if fr_guess_index == 0:
fr_guess_index = len(fine_x)//2
elif fr_guess_index == (len(fine_x)-1):
fr_guess_index = len(fine_x)//2
fr_guess = fine_x[fr_guess_index]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index:-1]
left = half_distance[0:fr_guess_index]
right_index = np.argmin(np.abs(right))+fr_guess_index
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(gain_z)))-np.min(20*np.log10(np.abs(fine_z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
#phi_guess = 0
#guess impedance rotation phi
#fit a circle to the iq loop
xc, yc, R, residu = calibrate.leastsq_circle(np.real(fine_z),np.imag(fine_z))
#compute angle between (off_res,off_res),(0,0) and (off_ress,off_res),(xc,yc) of the the fitted circle
off_res_i,off_res_q = (np.real(fine_z[0])+np.real(fine_z[-1]))/2.,(np.imag(fine_z[0])+np.imag(fine_z[-1]))/2.
x1, y1, = -off_res_i,-off_res_q
x2, y2 = xc-off_res_i,yc-off_res_q
dot = x1*x2 + y1*y2 # dot product
det = x1*y2 - y1*x2 # determinant
angle = np.arctan2(det, dot)
phi_guess = angle
# if phi is large better re guess f0
# f0 should be the farthers from the off res point
if (np.abs(phi_guess)>0.3):
dist1 = np.sqrt((np.real(fine_z[0])-np.real(fine_z))**2+(np.imag(fine_z[0])-np.imag(fine_z))**2)
dist2 = np.sqrt((np.real(fine_z[-1])-np.real(fine_z))**2+(np.imag(fine_z[-1])-np.imag(fine_z))**2)
fr_guess_index = np.argmax((dist1+dist2))
fr_guess = fine_x[fr_guess_index]
#also fix the Q gues
fine_z_derot = (fine_z-(off_res_i+1.j*off_res_q))*np.exp(1j*(-phi_guess))+(off_res_i+1.j*off_res_q)
#fr_guess_index = np.argmin(np.abs(fine_z_derot))
#fr_guess = fine_x[fr_guess_index]
mag_max = np.max(np.abs(fine_z_derot)**2)
mag_min = np.min(np.abs(fine_z_derot)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z_derot)**2-mag_3dB
right = half_distance[np.argmin(np.abs(fine_z_derot)):-1]
left = half_distance[0:np.argmin(np.abs(fine_z_derot))]
right_index = np.argmin(np.abs(right))+np.argmin(np.abs(fine_z_derot))
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#also fix amp guess
d = np.max(20*np.log10(np.abs(gain_z)))-np.min(20*np.log10(np.abs(fine_z_derot)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between min and max distance between iq points in fine sweep
a_guess = 0
#i0 and iq guess
if np.max(np.abs(fine_z))>np.max(np.abs(gain_z)): #if the resonator has an impedance mismatch rotation that makes the fine greater that the cabel delay
i0_guess = np.real(fine_z[np.argmax(np.abs(fine_z))])
q0_guess = np.imag(fine_z[np.argmax(np.abs(fine_z))])
else:
i0_guess = (np.real(fine_z[0])+np.real(fine_z[-1]))/2.
q0_guess = (np.imag(fine_z[0])+np.imag(fine_z[-1]))/2.
#cabel delay guess tau
#y = mx +b
#m = (y2 - y1)/(x2-x1)
#b = y-mx
m = (gain_phase - np.roll(gain_phase,1))/(gain_x-np.roll(gain_x,1))
b = gain_phase -m*gain_x
m_best = np.median(m[~np.isnan(m)])
tau_guess = m_best/(2*np.pi)
if verbose == True:
print("fr guess = %.3f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("phi guess = %.2f" %phi_guess)
print("i0 guess = %.2f" %i0_guess)
print("q0 guess = %.2f" %q0_guess)
print("tau guess = %.2f x 10^-7" %(tau_guess/10**-7))
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,i0_guess,q0_guess,tau_guess,fr_guess]
return x0
def guess_x0_mag_nonlinear_sep(fine_x,fine_z,gain_x,gain_z,verbose = False):
'''
# this is the same as guess_x0_mag_nonlinear except that it takes
# takes the fine scan and the gain scan as seperate variables
# this runs into less issues when trying to sort out what part of
# data is fine and what part is gain for the guessing
#make sure data is sorted from low to high frequency
'''
#phase of gain
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(fine_z))
#protect against guessing the first or last data points
if fr_guess_index == 0:
fr_guess_index = len(fine_x)//2
elif fr_guess_index == (len(fine_x)-1):
fr_guess_index = len(fine_x)//2
fr_guess = fine_x[fr_guess_index]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index:-1]
left = half_distance[0:fr_guess_index]
right_index = np.argmin(np.abs(right))+fr_guess_index
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(gain_z)))-np.min(20*np.log10(np.abs(fine_z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4
#polynomial fit to amp verus depth calculated emperically
#guess impedance rotation phi
#fit a circle to the iq loop
xc, yc, R, residu = calibrate.leastsq_circle(np.real(fine_z),np.imag(fine_z))
#compute angle between (off_res,off_res),(0,0) and (off_ress,off_res),(xc,yc) of the the fitted circle
off_res_i,off_res_q = (np.real(fine_z[0])+np.real(fine_z[-1]))/2.,(np.imag(fine_z[0])+np.imag(fine_z[-1]))/2.
x1, y1, = -off_res_i,-off_res_q
x2, y2 = xc-off_res_i,yc-off_res_q
dot = x1*x2 + y1*y2 # dot product
det = x1*y2 - y1*x2 # determinant
angle = | np.arctan2(det, dot) | numpy.arctan2 |
import sys, traceback
import numpy as np
import torch
from dynamical_system.modulation import Modulator
from imitation_learning.model import BinClassifier
from rapidly_exploring_random_tree.rrt import RRT
class Controller():
'''
This is an abstract super-class for all controllers. At the very least,
get_trajectory() and log_prior() need to be implemented.
'''
def __init__(self):
pass
def get_trajectory(self, env, kernel):
'''
given an environment (i.e. a specific task instantiation), roll out the
controller with randomness specified in the kernel and return the
trajectory of shape T x 2.
'''
raise NotImplementedError
def log_prior(self, kernel):
'''
return the prior probability of the controller kernel, for stochastic controllers.
return 0 for deterministic controllers.
'''
raise NotImplementedError
class DSController(Controller):
def __init__(self):
super().__init__()
self.modulator = Modulator()
self.warned = False
def get_trajectory(self, env, kernel=None):
assert env.oob_termination is False, 'early termination needs to be disabled'
assert env.time_limit >= 500, 'time limit should be greater than or equal to 500'
if env.enable_lidar is True and not self.warned:
print('WARNING: Not turning off lidar on env could be significantly slower')
self.warned = True
try:
self.modulator.set_arena(env.arena)
epsilon = sys.float_info.epsilon
done = False
s = env.s()[:2]
traj = [s]
while not done:
d = self.modulator.modulate(s)
d = d / max([0.3, d[0] + epsilon, d[1] + epsilon])
s, _, done, _ = env.step(d)
s = s[:2]
traj.append(s)
traj = np.array(traj)
return traj
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
return None
def log_prior(self, kernel):
return 0
class ILController(Controller):
def __init__(self, model_fn='imitation_learning/data_and_model/best.pt', device=None):
super().__init__()
if device is None:
device = ['cpu', 'cuda'][torch.cuda.is_available()]
model_data = torch.load(model_fn)
self.MEAN = model_data['MEAN']
self.STD = model_data['STD']
self.N_BINS = model_data['N_BINS']
self.policy = BinClassifier(18, self.N_BINS)
model_data = torch.load(model_fn, map_location='cpu')
self.policy.load_state_dict(model_data['model'])
self.BIN_RES = 2 * np.pi / self.N_BINS
self.BIN_LOW = - np.pi
self.device = device
self.policy.to(self.device)
def get_trajectory(self, env, kernel=None):
try:
traj = []
with torch.no_grad():
done = False
s = env.s()
traj.append(s[:2])
while not done:
s_scaled = torch.tensor((s - self.MEAN) / self.STD).float().to(self.device)
bin_idx = self.policy(s_scaled).cpu().numpy().argmax()
angle = bin_idx * self.BIN_RES - self.BIN_LOW
dx = | np.cos(angle) | numpy.cos |
import numpy as np
import netCDF4
import torch
from torch.utils.data import Dataset
class iciData(Dataset):
"""
Pytorch dataset for the ICI training data.
"""
def __init__(self, path, inChannels, target, T_rec = None, batch_size = None):
"""
Create instance of the dataset from a given file path.
Args:
path: Path to the NetCDF4 containing the data.
batch_size: If positive, data is provided in batches of this size
"""
super().__init__()
self.batch_size = batch_size
self.file = netCDF4.Dataset(path, mode = "r")
TB = self.file.variables["TB"][:]
channels = self.file.variables["channels"][:]
self.surface = self.file.variables["cases"][:]
self.channels = inChannels
idx = []
for i in range(len(inChannels)):
idx.append(np.argwhere(channels == inChannels[i])[0][0])
self.index = idx
self.itarget = np.argwhere(np.array(channels) == target)[0]
C = []
for i in range(len(inChannels)):
ic = self.index[i]
C.append(TB[1, :, ic])
self.x = np.float32(np.stack(C, axis = 1))
#store mean and std to normalise data
x_noise = self.add_noise(self.x, self.index)
self.std = np.std(x_noise, axis = 0)
self.mean = np.mean(x_noise, axis = 0)
self.y = np.float32(TB[0, :, self.itarget[0]])
self.y_noise = self.add_noise(self.y, self.itarget)
self.x = self.x.data
self.y = self.y.data
self.y_noise = self.y_noise.data
def __len__(self):
"""
The number of entries in the training data. This is part of the
pytorch interface for datasets.
Return:
int: The number of samples in the data set
"""
if self.batch_size is None:
return self.x.shape[0]
else:
return int(np.ceil(self.x.shape[0] / self.batch_size))
def __getitem__(self, i):
"""
Return element from the dataset. This is part of the
pytorch interface for datasets.
Args:
i: The index of the sample to return
"""
if (i == 0):
indices = np.random.permutation(self.x.shape[0])
self.x = self.x[indices, :]
self.y = self.y[indices]
if self.batch_size is None:
return (torch.tensor(self.x[[i], :]),
torch.tensor(self.y[[i]]))
else:
i_start = self.batch_size * i
i_end = self.batch_size * (i + 1)
x = self.x[i_start : i_end, :].copy()
x_noise = np.float32(self.add_noise(x, self.index))
x_norm = np.float32(self.normalise(x_noise))
return (torch.tensor(x_norm),
torch.tensor(self.y[i_start : i_end]))
def add_noise(self, x, index):
"""
Gaussian noise is added to every measurement before used
for training again.
Args:
the input TB in one batch of size (batch_size x number of channels)
Returns:
input TB with noise
"""
nedt = np.array([0.8, 0.8, 0.8, #183Ghz
0.7, 0.7, #243Ghz
1.2, 1.3, 1.5, #325Ghz
1.4, 1.6, 2.0, #448Ghz
1.6, 1.6]) #664Ghz
nedt_subset = nedt[index]
size_TB = int(x.size/len(nedt_subset))
x_noise = x.copy()
if len(index) > 1:
for ic in range(len(self.index)):
noise = np.random.normal(0, nedt_subset[ic], size_TB)
x_noise[:, ic] += noise
else:
noise = | np.random.normal(0, nedt_subset, size_TB) | numpy.random.normal |
""" @file setup.py
"""
import numpy as np
import copy
# DOT_assignment
from DOT_assignment import assignments
from DOT_assignment import controls
from DOT_assignment import dynamics
from DOT_assignment import engine
from DOT_assignment import linear_models_2D
from DOT_assignment import linear_models_3D
from DOT_assignment import run
from DOT_assignment import distributions
def setup_simulation(sim_profile):
""" Returns dictionary of controls, dynamics, decision-making policy, and initial state parameters
Input: Standard python dict containing descriptors outlining simulation requirements
Output: Standard python dict containing controls, dynamics, assignment, etc. data structures
"""
x0 = None
stationary_states = None
agent_model = sim_profile["agent_model"]
agent_control_policy = sim_profile["agent_control_policy"]
agent_formation = sim_profile["agent_formation"]
target_formation = sim_profile["target_formation"]
assignment_policy = sim_profile["assignment_policy"]
assignment_epoch = sim_profile["assignment_epoch"]
nagents = sim_profile["nagents"]
ntargets = sim_profile["ntargets"]
collisions = sim_profile["collisions"]
collision_tol = sim_profile["collision_tol"]
dim = sim_profile["dim"]
dt = sim_profile["dt"]
maxtime = sim_profile["maxtime"]
initial_conditions = sim_profile['initial_conditions']
if initial_conditions == None:
initial_formation_params = {
'nagents': nagents, 'agent_model': agent_model, 'agent_swarm_formation': agent_formation,
'ntargets': ntargets, 'target_swarm_formation': target_formation
}
initial_conditions = generate_initial_conditions(dim, initial_formation_params)
x0 = ic[0]
targets = ic[1]
else:
x0 = initial_conditions[0]
targets = initial_conditions[1]
sim = {}
parameters = ['agent_model', 'dx', 'du', 'A', 'B', 'agent_dyn', 'agent_pol', 'asst_pol', 'x0']
sim.fromkeys(parameters)
##### Dynamic Model #####
if dim == 2:
if agent_model == "Double_Integrator":
A, B, C, D, dx, du, statespace = linear_models_2D.double_integrator_2D()
### runner
sim_runner = run.run_identical_doubleint_2D
if agent_model == "Linearized_Quadcopter":
A, B, C, D, dx, du, statespace = linear_models_2D.quadcopter_2D()
### runner
sim_runner = run.run_identical_linearized_quadcopter_2D
if dim == 3:
if agent_model == "Double_Integrator":
A, B, C, D, dx, du, statespace = linear_models_3D.double_integrator_3D()
### runner
sim_runner = run.run_identical_doubleint_3D
if agent_model == "Linearized_Quadcopter":
A, B, C, D, dx, du, statespace = linear_models_3D.quadcopter_3D()
### runner
sim_runner = run.run_identical_linearized_quadcopter_3D
Q = np.eye(dx)
R = np.eye(du)
# TODO - remove
# DEBUG control terms
Q2 = None
Q3 = None
######################
if dim == 2:
if agent_model == 'Double_Integrator':
Q2 = copy.deepcopy(Q)
Q2[2,2] = 0.0
Q2[3,3] = 0.0
Q3 = copy.deepcopy(Q)
Q3[0, 0] = 100
Q3[1, 1] = 100
Q3[2,2] = 0.0
Q3[3,3] = 0.0
if agent_model == 'Linearized_Quadcopter':
Q3 = copy.deepcopy(Q)
Q3[0, 0] = 100
Q3[1, 1] = 100
Q3[2,2] = 100
Q3[3,3] = 100
Q3[4,4] = 0.0
Q3[5,5] = 0.0
Q3[6, 6] = 0.0
Q3[7, 7] = 0.0
if dim == 3:
if agent_model == 'Double_Integrator':
Q2 = copy.deepcopy(Q)
Q2[3,3] = 0.0
Q2[4,4] = 0.0
Q2[5,5] = 0.0
Q3 = copy.deepcopy(Q)
Q3[0, 0] = 1000
Q3[1, 1] = 1000
Q3[2, 2] = 1000
Q3[3,3] = 0.0
Q3[4,4] = 0.0
Q3[5,5] = 0.0
if agent_model == 'Linearized_Quadcopter':
Q3 = copy.deepcopy(Q)
Q3[0, 0] = 1000
Q3[1, 1] = 1000
Q3[2, 2] = 1000
Q3[3,3] = 1000
Q3[4,4] = 1000
Q3[5,5] = 1000
Q3[6,6] = 0.0
Q3[7,7] = 0.0
Q3[8,8] = 0.0
Q3[9, 9] = 0.0
Q3[10, 10] = 0.0
Q3[11, 11] = 0.0
######################
### Agent control law
if agent_control_policy == "LQR":
poltrack = [controls.LinearFeedbackConstTracker(A, B, Q, R, t) for t in targets]
### Agent Dynamics
ltidyn = dynamics.LTIDyn(A, B)
### Assignment Policy
if assignment_policy == 'AssignmentCustom':
apol = assignments.AssignmentCustom(nagents, ntargets)
if assignment_policy == 'AssignmentEMD':
apol = assignments.AssignmentEMD(nagents, ntargets)
### CONSTRUCT SIMULATION DICTIONARY
sim['agent_control_policy'] = agent_control_policy
sim['agent_model'] = agent_model
sim['agent_formation'] = agent_formation
sim['target_formation'] = target_formation
sim['collisions'] = collisions
sim['collision_tol'] = collision_tol
sim['dt'] = dt
sim['maxtime'] = maxtime
sim['dx'] = dx
sim['du'] = du
sim['statespace'] = statespace
sim['x0'] = x0
sim['agent_dyn'] = ltidyn
sim['agent_pol'] = poltrack
sim['asst_pol'] = apol
sim['asst_epoch'] = assignment_epoch
sim['nagents'] = nagents
sim['ntargets'] = ntargets
sim['runner'] = sim_runner
return sim
def generate_distribution(dim, space, num_particles, distribution):
"""
Returns discrete distribution of states (ie. X,Y,Z positions)
Input:
- dim: dimension
- space: range of values that distribution can take
- num_particles: number of particles within the distribution
- distribution: name of distribution
Output:
- states: vector consisting of n-dimensional states corresponding to a desired distribution
"""
if distribution == 'uniform_distribution':
states = np.random.uniform(-space, space, (num_particles,dim))
elif distribution == 'circle':
radius = space
states = [distributions.circle(dim, radius, num_particles, t) for t in range(num_particles)] # circle
elif distribution == 'fibonacci_sphere':
radius = space
states = [distributions.fibonacci_sphere(radius, num_particles, t) for t in range(num_particles)] # sphere
return states
# TODO breakdown into more functions
def generate_initial_conditions(dim, initial_formation_params):
""" Returns initial states for agents, targets, and target terminal locations
"""
x0 = None
cities = None
nagents = initial_formation_params['nagents']
agent_model = initial_formation_params['agent_model']
agent_swarm_formation = initial_formation_params['agent_swarm_formation']
ntargets = initial_formation_params['ntargets']
target_swarm_formation = initial_formation_params['target_swarm_formation']
r = 100
# agent position distribution (ie. x, y, z state components)
x0p = generate_distribution(dim, r, nagents, agent_swarm_formation)
# target position distribution
x02p = generate_distribution(dim, r, ntargets, target_swarm_formation)
# TODO Place these into separate function
# populate the rest of the agent/target state components given the dynamics models
if dim == 2:
###### DOUBLE_INTEGRATOR ######
if agent_model == "Double_Integrator":
A, B, C, D, dx, du, statespace = linear_models_2D.double_integrator_2D()
### Initial conditions
# populate agent state
x0 = np.zeros((nagents, dx))
# NOTE user-defined how the intial state is constructed
vel_range = 500
for ii, tt in enumerate(x0):
x0[ii] = np.array([x0p[ii][0],
x0p[ii][1],
np.random.uniform(-vel_range, vel_range, 1)[0],
np.random.uniform(-vel_range, vel_range, 1)[0]])
x0 = x0.flatten()
# populate target state
rot_x02p = np.random.uniform(-2*np.pi, 2*np.pi, (ntargets,dim)) # position spread
vel_range = 50
rot_vel_range = 25
x02 = np.zeros((ntargets, dx))
for ii, tt in enumerate(x02):
x02[ii] = np.array([
x02p[ii][0],
x02p[ii][1],
0, 0])
targets = x02.flatten()
x0 = np.hstack((x0, targets))
###### LINEARIZED_QUADCOPTER ######
if agent_model == "Linearized_Quadcopter":
A, B, C, D, dx, du, statespace = linear_models_2D.quadcopter_2D()
# Agents
# populate agent state
rot_x0p = np.random.uniform(-2*np.pi, 2*np.pi, (nagents,dim)) # position spread
vel_range = 500
rot_vel_range = 25
x0 = | np.zeros((nagents, dx)) | numpy.zeros |
"""
1次元 離散フーリエ変換
"""
import numpy as np
from scipy.fftpack import fft, ifft
import matplotlib.pyplot as plt
# 時系列のサンプルデータの作成
N = 120 # データ数
T=10 # サンプリング幅
del_t= T/N # サンプリング間隔
del_w=2*np.pi/T # 離散フーリエ変換の振動数の間隔
#
# 離散点 生成
xs = np.arange(0,T-del_t,del_t)
w=np.arange(2*np.pi/T, 2*np.pi*N/T, del_w)
#
f1,f2=3,4
f= | np.sin(2*np.pi*f1*xs) | numpy.sin |
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common corruptions to images.
Define 15+4 common image corruptions: Gaussian noise, shot noise, impulse_noise,
defocus blur, frosted glass blur, zoom blur, fog, brightness, contrast, elastic,
pixelate, jpeg compression, frost, snow, and motion blur.
4 extra corruptions: gaussian blur, saturate, spatter, and speckle noise.
"""
import io
import subprocess
import tempfile
import numpy as np
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
# To be populated by download_manager
FROST_FILENAMES = []
def _imagemagick_bin():
return 'imagemagick' # pylint: disable=unreachable
# /////////////// Corruption Helpers ///////////////
def around_and_astype(x):
"""Round a numpy array, and convert to uint8.
Args:
x: numpy array.
Returns:
numpy array with dtype uint8.
"""
return np.around(x).astype(np.uint8)
def disk(radius, alias_blur=0.1, dtype=np.float32):
"""Generating a Gaussian blurring kernel with disk shape.
Generating a Gaussian blurring kernel with disk shape using cv2 API.
Args:
radius: integer, radius of blurring kernel.
alias_blur: float, standard deviation of Gaussian blurring.
dtype: data type of kernel
Returns:
cv2 object of the Gaussian blurring kernel.
"""
if radius <= 8:
length = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
length = np.arange(-radius, radius + 1)
ksize = (5, 5)
x_axis, y_axis = np.meshgrid(length, length)
aliased_disk = np.array((x_axis**2 + y_axis**2) <= radius**2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return tfds.core.lazy_imports.cv2.GaussianBlur(
aliased_disk, ksize=ksize, sigmaX=alias_blur)
def clipped_zoom(img, zoom_factor):
"""Zoom image with clipping.
Zoom the central part of the image and clip extra pixels.
Args:
img: numpy array, uncorrupted image.
zoom_factor: numpy array, a sequence of float numbers for zoom factor.
Returns:
numpy array, zoomed image after clipping.
"""
h = img.shape[0]
ch = int(np.ceil(h / float(zoom_factor)))
top_h = (h - ch) // 2
w = img.shape[1]
cw = int(np.ceil(w / float(zoom_factor)))
top_w = (w - cw) // 2
img = tfds.core.lazy_imports.scipy.ndimage.zoom(
img[top_h:top_h + ch, top_w:top_w + cw], (zoom_factor, zoom_factor, 1),
order=1)
# trim off any extra pixels
trim_top_h = (img.shape[0] - h) // 2
trim_top_w = (img.shape[1] - w) // 2
return img[trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w]
def plasma_fractal(mapsize=512, wibbledecay=3):
"""Generate a heightmap using diamond-square algorithm.
Modification of the algorithm in
https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
Args:
mapsize: side length of the heightmap, must be a power of two.
wibbledecay: integer, decay factor.
Returns:
numpy 2d array, side length 'mapsize', of floats in [0,255].
"""
if mapsize & (mapsize - 1) != 0:
raise ValueError('mapsize must be a power of two.')
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square, calculate middle value as mean of points + wibble."""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond, calculate middle value as meanof points + wibble."""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize,
0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
# /////////////// End Corruption Helpers ///////////////
# /////////////// Corruptions ///////////////
def gaussian_noise(x, severity=1):
"""Gaussian noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added Gaussian noise.
"""
c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
return around_and_astype(x_clip)
def shot_noise(x, severity=1):
"""Shot noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added shot noise.
"""
c = [60, 25, 12, 5, 3][severity - 1]
x = np.array(x) / 255.
x_clip = np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255
return around_and_astype(x_clip)
def impulse_noise(x, severity=1):
"""Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
"""
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = tfds.core.lazy_imports.skimage.util.random_noise(
np.array(x) / 255., mode='s&p', amount=c)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip)
def defocus_blur(x, severity=1):
"""Defocus blurring to images.
Apply defocus blurring to images using Gaussian kernel.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied defocus blur.
"""
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(tfds.core.lazy_imports.cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3
x_clip = np.clip(channels, 0, 1) * 255
return around_and_astype(x_clip)
def glass_blur(x, severity=1):
"""Frosted glass blurring to images.
Apply frosted glass blurring to images by shuffling pixels locally.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur.
"""
# sigma, max_delta, iterations
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2),
(1.5, 4, 2)][severity - 1]
x = np.uint8(
tfds.core.lazy_imports.skimage.filters.gaussian(
np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for _ in range(c[2]):
for h in range(x.shape[0] - c[1], c[1], -1):
for w in range(x.shape[1] - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
x_clip = np.clip(
tfds.core.lazy_imports.skimage.filters.gaussian(
x / 255., sigma=c[0], multichannel=True), 0, 1)
x_clip *= 255
return around_and_astype(x_clip)
def zoom_blur(x, severity=1):
"""Zoom blurring to images.
Applying zoom blurring to images by zooming the central part of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied zoom blur.
"""
c = [
np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)
][severity - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip)
def fog(x, severity=1):
"""Fog corruption to images.
Adding fog to images. Fog is generated by diamond-square algorithm.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added fog.
"""
c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1]
x = np.array(x) / 255.
max_val = x.max()
mapsize = 512
shape = x.shape
max_length = max(shape[0], shape[1])
if max_length > mapsize:
mapsize = 2**int(np.ceil(np.log2(float(max_length))))
tmp = plasma_fractal(mapsize=mapsize, wibbledecay=c[1])
tmp = tmp[:x.shape[0], :x.shape[1]]
tmp = tmp[..., np.newaxis]
x += c[0] * tmp
x_clip = np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
return around_and_astype(x_clip)
def brightness(x, severity=1):
"""Change brightness of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed brightness.
"""
c = [.1, .2, .3, .4, .5][severity - 1]
x = np.array(x) / 255.
x = tfds.core.lazy_imports.skimage.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = tfds.core.lazy_imports.skimage.color.hsv2rgb(x)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip)
def contrast(x, severity=1):
"""Change contrast of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Changed contrast.
"""
c = [0.4, .3, .2, .1, .05][severity - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
x_clip = np.clip((x - means) * c + means, 0, 1) * 255
return around_and_astype(x_clip)
def elastic_transform(x, severity=1):
"""Conduct elastic transform to images.
Elastic transform is performed on small patches of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied elastic transform.
"""
c = [(244 * 2, 244 * 0.7, 244 * 0.1), (244 * 2, 244 * 0.08, 244 * 0.2),
(244 * 0.05, 244 * 0.01, 244 * 0.02),
(244 * 0.07, 244 * 0.01, 244 * 0.02),
(244 * 0.12, 244 * 0.01, 244 * 0.02)][severity - 1]
image = np.array(x, dtype=np.float32) / 255.
shape = image.shape
shape_size = shape[:2]
# random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size
])
pts2 = pts1 + np.random.uniform(
-c[2], c[2], size=pts1.shape).astype(np.float32)
affine_trans = tfds.core.lazy_imports.cv2.getAffineTransform(pts1, pts2)
image = tfds.core.lazy_imports.cv2.warpAffine(
image,
affine_trans,
shape_size[::-1],
borderMode=tfds.core.lazy_imports.cv2.BORDER_REFLECT_101)
dx = (tfds.core.lazy_imports.skimage.filters.gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode='reflect',
truncate=3) * c[0]).astype(np.float32)
dy = (tfds.core.lazy_imports.skimage.filters.gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode='reflect',
truncate=3) * c[0]).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(
np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy,
(-1, 1)), np.reshape(x + dx,
(-1, 1)), np.reshape(z, (-1, 1))
x_clip = np.clip(
tfds.core.lazy_imports.scipy.ndimage.interpolation.map_coordinates(
image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255
return around_and_astype(x_clip)
def pixelate(x, severity=1):
"""Pixelate images.
Conduct pixelating corruptions to images by first shrinking the images and
then resizing to original size.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied pixelating
corruption.
"""
c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1]
shape = x.shape
x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))
x = x.resize((int(shape[1] * c), int(shape[0] * c)))
x = x.resize((shape[1], shape[0]))
return np.asarray(x)
def jpeg_compression(x, severity=1):
"""Conduct jpeg compression to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied jpeg compression.
"""
c = [25, 18, 15, 10, 7][severity - 1]
x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))
output = io.BytesIO()
x.save(output, 'JPEG', quality=c)
output.seek(0)
x = tfds.core.lazy_imports.PIL_Image.open(output)
return np.asarray(x)
def frost(x, severity=1):
"""Apply frost to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frost.
"""
c = [(1, 0.4), (0.8, 0.6), (0.7, 0.7), (0.65, 0.7), (0.6, 0.75)][severity - 1]
filename = FROST_FILENAMES[np.random.randint(5)]
with tempfile.NamedTemporaryFile() as im_frost:
tf.io.gfile.copy(filename, im_frost.name, overwrite=True)
frost_img = tfds.core.lazy_imports.cv2.imread(im_frost.name)
# randomly crop and convert to rgb
x_start, y_start = np.random.randint(
0, frost_img.shape[0] - 224), np.random.randint(0,
frost_img.shape[1] - 224)
frost_img = frost_img[x_start:x_start + 224, y_start:y_start + 224][...,
[2, 1, 0]]
x = np.clip(c[0] * np.array(x) + c[1] * frost_img, 0, 255)
return around_and_astype(x)
def snow(x, severity=1):
"""Apply snow to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied snow.
"""
cv2 = tfds.core.lazy_imports.cv2
PIL_Image = tfds.core.lazy_imports.PIL_Image # pylint: disable=invalid-name
c = [(0.1, 0.3, 3, 0.5, 10, 4, 0.8), (0.2, 0.3, 2, 0.5, 12, 4, 0.7),
(0.55, 0.3, 4, 0.9, 12, 8, 0.7), (0.55, 0.3, 4.5, 0.85, 12, 8, 0.65),
(0.55, 0.3, 2.5, 0.85, 12, 12, 0.55)][severity - 1]
x = np.array(x, dtype=np.float32) / 255.
snow_layer = np.random.normal(
size=x.shape[:2], loc=c[0], scale=c[1]) # [:2] for monochrome
snow_layer = clipped_zoom(snow_layer[..., np.newaxis], c[2])
snow_layer[snow_layer < c[3]] = 0
snow_layer = PIL_Image.fromarray(
(np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode='L')
with tempfile.NamedTemporaryFile() as im_input:
with tempfile.NamedTemporaryFile() as im_output:
snow_layer.save(im_input.name, format='PNG')
convert_bin = _imagemagick_bin()
radius = c[4]
sigma = c[5]
angle = np.random.uniform(-135, -45)
subprocess.check_output([
convert_bin, '-motion-blur', '{}x{}+{}'.format(radius, sigma, angle),
im_input.name, im_output.name
])
with open(im_output.name, 'rb') as f:
output = f.read()
snow_layer = cv2.imdecode(
np.frombuffer(output, np.uint8), cv2.IMREAD_UNCHANGED) / 255.
snow_layer = snow_layer[..., np.newaxis]
x = c[6] * x + (1 - c[6]) * np.maximum(
x,
cv2.cvtColor(x, cv2.COLOR_RGB2GRAY).reshape(224, 224, 1) * 1.5 + 0.5)
x = np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255
return around_and_astype(x)
def motion_blur(x, severity=1):
"""Apply motion blur to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied motion blur.
"""
c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)][severity - 1]
x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))
with tempfile.NamedTemporaryFile() as im_input:
with tempfile.NamedTemporaryFile() as im_output:
x.save(im_input.name, format='PNG')
convert_bin = _imagemagick_bin()
radius = c[0]
sigma = c[1]
angle = np.random.uniform(-45, -45)
subprocess.check_output([
convert_bin, '-motion-blur', '{}x{}+{}'.format(radius, sigma, angle),
im_input.name, im_output.name
])
with open(im_output.name, 'rb') as f:
output = f.read()
x = tfds.core.lazy_imports.cv2.imdecode(
np.frombuffer(output, np.uint8),
tfds.core.lazy_imports.cv2.IMREAD_UNCHANGED)
if x.shape != (224, 224):
x = np.clip(x[..., [2, 1, 0]], 0, 255) # BGR to RGB
else: # greyscale to RGB
x = np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255)
return around_and_astype(x)
# /////////////// Extra Corruptions ///////////////
def gaussian_blur(x, severity=1):
"""Apply gaussian blur to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied gaussian blur.
"""
c = [1, 2, 3, 4, 6][severity - 1]
x = tfds.core.lazy_imports.skimage.filters.gaussian(
np.array(x) / 255., sigma=c, multichannel=True)
x = np.clip(x, 0, 1) * 255
return around_and_astype(x)
def saturate(x, severity=1):
"""Increase saturation of images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied saturation.
"""
c = [(0.3, 0), (0.1, 0), (2, 0), (5, 0.1), (20, 0.2)][severity - 1]
x = np.array(x) / 255.
x = tfds.core.lazy_imports.skimage.color.rgb2hsv(x)
x[:, :, 1] = np.clip(x[:, :, 1] * c[0] + c[1], 0, 1)
x = tfds.core.lazy_imports.skimage.color.hsv2rgb(x)
x = np.clip(x, 0, 1) * 255
return around_and_astype(x)
def spatter(x, severity=1):
"""Apply spatter to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied spatter.
"""
cv2 = tfds.core.lazy_imports.cv2
skimage = tfds.core.lazy_imports.skimage
c = [(0.65, 0.3, 4, 0.69, 0.6, 0), (0.65, 0.3, 3, 0.68, 0.6, 0),
(0.65, 0.3, 2, 0.68, 0.5, 0), (0.65, 0.3, 1, 0.65, 1.5, 1),
(0.67, 0.4, 1, 0.65, 1.5, 1)][severity - 1]
x = np.array(x, dtype=np.float32) / 255.
liquid_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1])
liquid_layer = skimage.filters.gaussian(liquid_layer, sigma=c[2])
liquid_layer[liquid_layer < c[3]] = 0
if c[5] == 0:
liquid_layer = (liquid_layer * 255).astype(np.uint8)
dist = 255 - cv2.Canny(liquid_layer, 50, 150)
dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5)
_, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC)
dist = cv2.blur(dist, (3, 3)).astype(np.uint8)
dist = cv2.equalizeHist(dist)
# ker = np.array([[-1,-2,-3],[-2,0,0],[-3,0,1]], dtype=np.float32)
# ker -= np.mean(ker)
ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])
dist = cv2.filter2D(dist, cv2.CVX_8U, ker)
dist = cv2.blur(dist, (3, 3)).astype(np.float32)
m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA)
m /= np.max(m, axis=(0, 1))
m *= c[4]
# water is pale turqouise
color = np.concatenate(
(175 / 255. * np.ones_like(m[..., :1]), 238 / 255. *
np.ones_like(m[..., :1]), 238 / 255. * np.ones_like(m[..., :1])),
axis=2)
color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA)
x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)
x = cv2.cvtColor(np.clip(x + m * color, 0, 1), cv2.COLOR_BGRA2BGR) * 255
else:
m = | np.where(liquid_layer > c[3], 1, 0) | numpy.where |
from keras.models import Sequential
from keras.layers import Conv2D, Conv2DTranspose, Input, BatchNormalization, PReLU
from keras.callbacks import ModelCheckpoint, Callback, TensorBoard
from keras.optimizers import SGD, Adam
import numpy as np
import math
import os
import random
from os import listdir, makedirs
from os.path import isfile, join, exists
from PIL import Image
import os.path, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from s3sync import S3SyncCallback
def model(scale = 2):
d = 56
s = 12
m = 4
c = 3
SRCNN = Sequential()
SRCNN.add(Conv2D(nb_filter=d, nb_row=5, nb_col=5, init='glorot_uniform', border_mode='same', bias=True, input_shape=(100, 100, 3)))
SRCNN.add(PReLU(shared_axes=[1, 2]))
SRCNN.add(Conv2D(nb_filter=s, nb_row=1, nb_col=1, init='glorot_uniform', border_mode='same', bias=True))
SRCNN.add(PReLU(shared_axes=[1, 2]))
for i in range(m):
SRCNN.add(Conv2D(nb_filter=s, nb_row=3, nb_col=3, init='glorot_uniform', border_mode='same', bias=True))
SRCNN.add(PReLU(shared_axes=[1, 2]))
SRCNN.add(Conv2D(nb_filter=d, nb_row=1, nb_col=1, init='glorot_uniform', border_mode='same', bias=True))
SRCNN.add(PReLU(shared_axes=[1, 2]))
SRCNN.add(Conv2DTranspose(filters=3, kernel_size=(9,9), strides=(scale, scale), init='glorot_uniform', border_mode='same', bias=True))
adam = Adam(lr=0.0003)
SRCNN.compile(optimizer=adam, loss='mean_squared_error', metrics=['mean_squared_error'])
return SRCNN
class MyDataGenerator(object):
def flow_from_directory(self, input_dir, label_dir, batch_size=32):
images = []
labels = []
while True:
files = listdir(input_dir)
random.shuffle(files)
for f in files:
images.append(self.load_image(input_dir, f))
labels.append(self.load_image(label_dir, f))
if len(images) == batch_size:
x_inputs = | np.asarray(images) | numpy.asarray |
"""Testing for K-means"""
import re
import sys
import numpy as np
from scipy import sparse as sp
from threadpoolctl import threadpool_limits
import pytest
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import assert_raise_message
from sklearn.utils.fixes import _astype_copy_false
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics import pairwise_distances_argmin
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster._kmeans import _labels_inertia
from sklearn.cluster._kmeans import _mini_batch_step
from sklearn.cluster._k_means_fast import _relocate_empty_clusters_dense
from sklearn.cluster._k_means_fast import _relocate_empty_clusters_sparse
from sklearn.cluster._k_means_fast import _euclidean_dense_dense_wrapper
from sklearn.cluster._k_means_fast import _euclidean_sparse_dense_wrapper
from sklearn.cluster._k_means_fast import _inertia_dense
from sklearn.cluster._k_means_fast import _inertia_sparse
from sklearn.datasets import make_blobs
from io import StringIO
from sklearn.metrics.cluster import homogeneity_score
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("algo", ["full", "elkan"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_kmeans_results(array_constr, algo, dtype):
# Checks that KMeans works as intended on toy dataset by comparing with
# expected results computed by hand.
X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]], dtype=dtype)
sample_weight = [3, 1, 1, 3]
init_centers = np.array([[0, 0], [1, 1]], dtype=dtype)
expected_labels = [0, 0, 1, 1]
expected_inertia = 0.375
expected_centers = np.array([[0.125, 0], [0.875, 1]], dtype=dtype)
expected_n_iter = 2
kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo)
kmeans.fit(X, sample_weight=sample_weight)
assert_array_equal(kmeans.labels_, expected_labels)
assert_allclose(kmeans.inertia_, expected_inertia)
assert_allclose(kmeans.cluster_centers_, expected_centers)
assert kmeans.n_iter_ == expected_n_iter
@pytest.mark.parametrize("array_constr",
[np.array, sp.csr_matrix],
ids=['dense', 'sparse'])
@pytest.mark.parametrize("algo", ['full', 'elkan'])
def test_relocated_clusters(array_constr, algo):
# check that empty clusters are relocated as expected
X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]])
# second center too far from others points will be empty at first iter
init_centers = np.array([[0.5, 0.5], [3, 3]])
expected_labels = [0, 0, 1, 1]
expected_inertia = 0.25
expected_centers = [[0.25, 0], [0.75, 1]]
expected_n_iter = 3
kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo)
kmeans.fit(X)
assert_array_equal(kmeans.labels_, expected_labels)
assert_almost_equal(kmeans.inertia_, expected_inertia)
assert_array_almost_equal(kmeans.cluster_centers_, expected_centers)
assert kmeans.n_iter_ == expected_n_iter
@pytest.mark.parametrize("representation", ["dense", "sparse"])
def test_relocate_empty_clusters(representation):
# test for the _relocate_empty_clusters_(dense/sparse) helpers
# Synthetic dataset with 3 obvious clusters of different sizes
X = np.array(
[-10., -9.5, -9, -8.5, -8, -1, 1, 9, 9.5, 10]).reshape(-1, 1)
if representation == "sparse":
X = sp.csr_matrix(X)
sample_weight = np.full(shape=10, fill_value=1.)
# centers all initialized to the first point of X
centers_old = np.array([-10., -10, -10]).reshape(-1, 1)
# With this initialization, all points will be assigned to the first center
# At this point a center in centers_new is the weighted sum of the points
# it contains if it's not empty, otherwise it is the same as before.
centers_new = np.array([-16.5, -10, -10]).reshape(-1, 1)
weight_in_clusters = np.array([10., 0, 0])
labels = np.zeros(10, dtype=np.int32)
if representation == "dense":
_relocate_empty_clusters_dense(X, sample_weight, centers_old,
centers_new, weight_in_clusters, labels)
else:
_relocate_empty_clusters_sparse(X.data, X.indices, X.indptr,
sample_weight, centers_old,
centers_new, weight_in_clusters,
labels)
# The relocation scheme will take the 2 points farthest from the center and
# assign them to the 2 empty clusters, i.e. points at 10 and at 9.9. The
# first center will be updated to contain the other 8 points.
assert_array_equal(weight_in_clusters, [8, 1, 1])
assert_allclose(centers_new, [[-36], [10], [9.5]])
@pytest.mark.parametrize("distribution", ["normal", "blobs"])
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("tol", [1e-2, 1e-8, 1e-100, 0])
def test_kmeans_elkan_results(distribution, array_constr, tol):
# Check that results are identical between lloyd and elkan algorithms
rnd = np.random.RandomState(0)
if distribution == 'normal':
X = rnd.normal(size=(5000, 10))
else:
X, _ = make_blobs(random_state=rnd)
km_full = KMeans(algorithm='full', n_clusters=5,
random_state=0, n_init=1, tol=tol)
km_elkan = KMeans(algorithm='elkan', n_clusters=5,
random_state=0, n_init=1, tol=tol)
km_full.fit(X)
km_elkan.fit(X)
assert_allclose(km_elkan.cluster_centers_, km_full.cluster_centers_)
assert_array_equal(km_elkan.labels_, km_full.labels_)
assert km_elkan.n_iter_ == km_full.n_iter_
assert km_elkan.inertia_ == pytest.approx(km_full.inertia_, rel=1e-6)
@pytest.mark.parametrize('algorithm', ['full', 'elkan'])
def test_kmeans_convergence(algorithm):
# Check that KMeans stops when convergence is reached when tol=0. (#16075)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(5000, 10))
max_iter = 300
km = KMeans(algorithm=algorithm, n_clusters=5, random_state=0,
n_init=1, tol=0, max_iter=max_iter).fit(X)
assert km.n_iter_ < max_iter
@pytest.mark.parametrize('distribution', ['normal', 'blobs'])
def test_elkan_results_sparse(distribution):
# check that results are identical between lloyd and elkan algorithms
# with sparse input
rnd = np.random.RandomState(0)
if distribution == 'normal':
X = sp.random(100, 100, density=0.1, format='csr', random_state=rnd)
X.data = rnd.randn(len(X.data))
else:
X, _ = make_blobs(n_samples=100, n_features=100, random_state=rnd)
X = sp.csr_matrix(X)
km_full = KMeans(algorithm='full', n_clusters=5, random_state=0, n_init=1)
km_elkan = KMeans(algorithm='elkan', n_clusters=5,
random_state=0, n_init=1)
km_full.fit(X)
km_elkan.fit(X)
assert_allclose(km_elkan.cluster_centers_, km_full.cluster_centers_)
assert_allclose(km_elkan.labels_, km_full.labels_)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = np.full(n_samples, -1, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert (mindist >= 0.0).all()
assert (labels_gold != -1).all()
sample_weight = None
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, sample_weight, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, sample_weight, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
weight_sums = np.zeros(new_centers.shape[0], dtype=np.double)
weight_sums_csr = np.zeros(new_centers.shape[0], dtype=np.double)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
sample_weight_mb = np.ones(X_mb.shape[0], dtype=np.double)
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, sample_weight_mb, x_mb_squared_norms, new_centers, weight_sums,
buffer, 1, None, random_reassign=False)
assert old_inertia > 0.0
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, sample_weight_mb, x_mb_squared_norms, new_centers)
assert new_inertia > 0.0
assert new_inertia < old_inertia
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, sample_weight_mb, x_mb_squared_norms_csr, new_centers_csr,
weight_sums_csr, buffer_csr, 1, None, random_reassign=False)
assert old_inertia_csr > 0.0
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, sample_weight_mb, x_mb_squared_norms_csr, new_centers_csr)
assert new_inertia_csr > 0.0
assert new_inertia_csr < old_inertia_csr
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert centers.shape == (n_clusters, n_features)
labels = km.labels_
assert np.unique(labels).shape[0] == n_clusters
# check that the labels assignment are perfect (up to a permutation)
assert v_measure_score(true_labels, labels) == 1.0
assert km.inertia_ > 0.0
# check error on dataset being too small
assert_raise_message(ValueError, "n_samples=1 should be >= n_clusters=%d"
% km.n_clusters, km.fit, [[0., 1.]])
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@pytest.mark.parametrize('data', [X, X_csr], ids=['dense', 'sparse'])
@pytest.mark.parametrize('init', ['random', 'k-means++', centers.copy()])
def test_k_means_init(data, init):
km = KMeans(init=init, n_clusters=n_clusters, random_state=42, n_init=1)
km.fit(data)
_check_fitted_model(km)
@pytest.mark.parametrize("init", ["random", "k-means++", centers,
lambda X, k, random_state: centers],
ids=["random", "k-means++", "ndarray", "callable"])
def test_minibatch_kmeans_partial_fit_init(init):
# Check MiniBatchKMeans init with partial_fit
km = MiniBatchKMeans(init=init, n_clusters=n_clusters, random_state=0)
for i in range(100):
# "random" init requires many batches to recover the true labels.
km.partial_fit(X)
_check_fitted_model(km)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, random_state=42, n_clusters=2)
km.fit(X)
assert_array_almost_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
@pytest.mark.parametrize('algo', ['full', 'elkan'])
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
@pytest.mark.parametrize('constructor', [np.asarray, sp.csr_matrix])
@pytest.mark.parametrize('seed, max_iter, tol', [
(0, 2, 1e-7), # strict non-convergence
(1, 2, 1e-1), # loose non-convergence
(3, 300, 1e-7), # strict convergence
(4, 300, 1e-1), # loose convergence
])
def test_k_means_fit_predict(algo, dtype, constructor, seed, max_iter, tol):
# check that fit.predict gives same result as fit_predict
# There's a very small chance of failure with elkan on unstructured dataset
# because predict method uses fast euclidean distances computation which
# may cause small numerical instabilities.
# NB: This test is largely redundant with respect to test_predict and
# test_predict_equal_labels. This test has the added effect of
# testing idempotence of the fittng procesdure which appears to
# be where it fails on some MacOS setups.
if sys.platform == "darwin":
pytest.xfail(
"Known failures on MacOS, See "
"https://github.com/scikit-learn/scikit-learn/issues/12644")
rng = np.random.RandomState(seed)
X = make_blobs(n_samples=1000, n_features=10, centers=10,
random_state=rng)[0].astype(dtype, copy=False)
X = constructor(X)
kmeans = KMeans(algorithm=algo, n_clusters=10, random_state=seed,
tol=tol, max_iter=max_iter)
labels_1 = kmeans.fit(X).predict(X)
labels_2 = kmeans.fit_predict(X)
# Due to randomness in the order in which chunks of data are processed when
# using more than one thread, the absolute values of the labels can be
# different between the 2 strategies but they should correspond to the same
# clustering.
assert v_measure_score(labels_1, labels_2) == 1
def test_minibatch_kmeans_verbose():
# Check verbose mode of MiniBatchKMeans for better coverage.
km = MiniBatchKMeans(n_clusters=n_clusters, random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
km.fit(X)
finally:
sys.stdout = old_stdout
@pytest.mark.parametrize("algorithm", ["full", "elkan"])
@pytest.mark.parametrize("tol", [1e-2, 0])
def test_kmeans_verbose(algorithm, tol, capsys):
# Check verbose mode of KMeans for better coverage.
X = np.random.RandomState(0).normal(size=(5000, 10))
KMeans(algorithm=algorithm, n_clusters=n_clusters, random_state=42,
init="random", n_init=1, tol=tol, verbose=1).fit(X)
captured = capsys.readouterr()
assert re.search(r"Initialization complete", captured.out)
assert re.search(r"Iteration [0-9]+, inertia", captured.out)
if tol == 0:
assert re.search(r"strict convergence", captured.out)
else:
assert re.search(r"center shift .* within tolerance", captured.out)
def test_minibatch_kmeans_warning_init_size():
# Check that a warning is raised when init_size is smaller than n_clusters
with pytest.warns(RuntimeWarning,
match=r"init_size.* should be larger than n_clusters"):
MiniBatchKMeans(init_size=10, n_clusters=20).fit(X)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
@pytest.mark.parametrize('data', [X, X_csr], ids=['dense', 'sparse'])
@pytest.mark.parametrize('init', ["random", 'k-means++', centers.copy()])
def test_minibatch_k_means_init(data, init):
mb_k_means = MiniBatchKMeans(init=init, n_clusters=n_clusters,
random_state=42, n_init=10)
mb_k_means.fit(data)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert mb_k_means.cluster_centers_.any(axis=1).sum() > 10
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert mb_k_means.cluster_centers_.any(axis=1).sum() > 10
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert mb_k_means.cluster_centers_.any(axis=1).sum() > 10
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should no longer be good
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, sample_weight, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert score_before > mb_k_means.score(this_X)
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, sample_weight, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert v_measure_score(true_labels, labels) == 1.0
def test_minibatch_kmeans_default_init_size():
# Check the internal _init_size attribute of MiniBatchKMeans
# default init size should be 3 * batch_size
km = MiniBatchKMeans(n_clusters=10, batch_size=5, n_init=1).fit(X)
assert km._init_size == 15
# if 3 * batch size < n_clusters, it should then be 3 * n_clusters
km = MiniBatchKMeans(n_clusters=10, batch_size=1, n_init=1).fit(X)
assert km._init_size == 30
# it should not be larger than n_samples
km = MiniBatchKMeans(n_clusters=10, batch_size=5, n_init=1,
init_size=n_samples + 1).fit(X)
assert km._init_size == n_samples
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert mb_k_means.init_size == 666
assert mb_k_means.init_size_ == n_samples
_check_fitted_model(mb_k_means)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert len(np.unique(km.labels_)) == 3
centers = km.cluster_centers_
assert np.linalg.norm(centers[0] - centers[1]) >= 0.1
assert np.linalg.norm(centers[0] - centers[2]) >= 0.1
assert np.linalg.norm(centers[1] - centers[2]) >= 0.1
@pytest.mark.parametrize('algo', ['full', 'elkan'])
def test_score(algo):
# Check that fitting k-means with multiple inits gives better score
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1,
algorithm=algo)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1,
algorithm=algo)
s2 = km2.fit(X).score(X)
assert s2 > s1
@pytest.mark.parametrize('Estimator', [KMeans, MiniBatchKMeans])
@pytest.mark.parametrize('data', [X, X_csr], ids=['dense', 'sparse'])
@pytest.mark.parametrize('init', ['random', 'k-means++', centers.copy()])
def test_predict(Estimator, data, init):
k_means = Estimator(n_clusters=n_clusters, init=init,
n_init=10, random_state=0).fit(data)
# sanity check: re-predict labeling for training set samples
assert_array_equal(k_means.predict(data), k_means.labels_)
# sanity check: predict centroid labels
pred = k_means.predict(k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# re-predict labels for training set using fit_predict
pred = k_means.fit_predict(data)
assert_array_equal(pred, k_means.labels_)
@pytest.mark.parametrize('init', ['random', 'k-means++', centers.copy()])
def test_predict_minibatch_dense_sparse(init):
# check that models trained on sparse input also works for dense input at
# predict time
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init=init,
n_init=10, random_state=0).fit(X_csr)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_int_input():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
for dtype in [np.int32, np.int64]:
X_int = np.array(X_list, dtype=dtype)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(
X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
for km in fitted_models:
assert km.cluster_centers_.dtype == np.float64
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_almost_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert X_new[c, c] == 0
for c2 in range(n_clusters):
if c != c2:
assert X_new[c, c2] > 0
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_almost_equal(X1, X2)
@pytest.mark.parametrize('algo', ['full', 'elkan'])
def test_predict_equal_labels(algo):
km = KMeans(random_state=13, n_init=1, max_iter=1,
algorithm=algo)
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
def test_full_vs_elkan():
km1 = KMeans(algorithm='full', random_state=13).fit(X)
km2 = KMeans(algorithm='elkan', random_state=13).fit(X)
assert homogeneity_score(
km1.predict(X), km2.predict(X)
) == pytest.approx(1.0)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert inertia[i] >= inertia[i + 1], failure_msg
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
sample_weight=None,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert centers.shape == (n_clusters, n_features)
labels = labels
assert np.unique(labels).shape[0] == n_clusters
# check that the labels assignment are perfect (up to a permutation)
assert v_measure_score(true_labels, labels) == 1.0
assert inertia > 0.0
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
sample_weight=None, init=centers)
def test_x_squared_norms_init_centroids():
# Test that x_squared_norms can be None in _init_centroids
from sklearn.cluster._kmeans import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_almost_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_float_precision(Estimator, data):
# Check that the results are the same for single and double precision.
km = Estimator(n_init=1, random_state=0)
inertia = {}
Xt = {}
centers = {}
labels = {}
for dtype in [np.float64, np.float32]:
X = data.astype(dtype, **_astype_copy_false(data))
km.fit(X)
inertia[dtype] = km.inertia_
Xt[dtype] = km.transform(X)
centers[dtype] = km.cluster_centers_
labels[dtype] = km.labels_
# dtype of cluster centers has to be the dtype of the input data
assert km.cluster_centers_.dtype == dtype
# same with partial_fit
if Estimator is MiniBatchKMeans:
km.partial_fit(X[0:3])
assert km.cluster_centers_.dtype == dtype
# compare arrays with low precision since the difference between 32 and
# 64 bit comes from an accumulation of rounding errors.
assert_allclose(inertia[np.float32], inertia[np.float64], rtol=1e-5)
assert_allclose(Xt[np.float32], Xt[np.float64], rtol=1e-5)
assert_allclose(centers[np.float32], centers[np.float64], rtol=1e-5)
assert_array_equal(labels[np.float32], labels[np.float64])
def test_k_means_init_centers():
# This test is used to check KMeans won't mutate the user provided input
# array silently even if input data and init centers have the same type
X_small = np.array([[1.1, 1.1], [-7.5, -7.5], [-1.1, -1.1], [7.5, 7.5]])
init_centers = np.array([[0.0, 0.0], [5.0, 5.0], [-5.0, -5.0]])
for dtype in [np.int32, np.int64, np.float32, np.float64]:
X_test = dtype(X_small)
init_centers_test = dtype(init_centers)
assert_array_equal(init_centers, init_centers_test)
km = KMeans(init=init_centers_test, n_clusters=3, n_init=1)
km.fit(X_test)
assert np.may_share_memory(km.cluster_centers_,
init_centers) is False
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
def test_k_means_init_fitted_centers(data):
# Get a local optimum
centers = KMeans(n_clusters=3).fit(X).cluster_centers_
# Fit starting from a local optimum shouldn't change the solution
new_centers = KMeans(n_clusters=3, init=centers,
n_init=1).fit(X).cluster_centers_
assert_array_almost_equal(centers, new_centers)
def test_less_centers_than_unique_points():
X = np.asarray([[0, 0],
[0, 1],
[1, 0],
[1, 0]]) # last point is duplicated
km = KMeans(n_clusters=4).fit(X)
# only three distinct points, so only three clusters
# can have points assigned to them
assert set(km.labels_) == set(range(3))
# k_means should warn that fewer labels than cluster
# centers have been used
msg = ("Number of distinct clusters (3) found smaller than "
"n_clusters (4). Possibly due to duplicate points in X.")
assert_warns_message(ConvergenceWarning, msg, k_means, X,
sample_weight=None, n_clusters=4)
def _sort_centers(centers):
return | np.sort(centers, axis=0) | numpy.sort |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 10:17:34 2020
compare the features calculated by kubios and calculated by our own algorithm
@author: skjerns
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import config as cfg
from sleep import SleepSet
import matplotlib.pyplot as plt
import scipy
import pandas as pd
from scipy.ndimage import median_filter, convolve
from scipy.ndimage.filters import gaussian_filter1d
os.makedirs(os.path.join(cfg.documents, 'reports', 'feat_comparison'), exist_ok=True)
ss = SleepSet(cfg.folder_unisens)
p = ss[16]
p.reset()
matfile = dict(p.feats.get_data())
kubios = matfile['TimeVar']
def best_corr(kubios, feat1):
"""
we correlate all features of kubios with this feature
this is a somewhat sound way to check whether our feature has
the best correlation with what is actually calculated
"""
df = pd.DataFrame(columns=['Name', 'corr', 'data'])
for feat2_name, feat2 in kubios.items():
if abs(len(feat2)-len(feat1))>10: continue
if np.isnan(feat2).all(): continue
min_len = min(len(feat1), len(feat2))
mean = np.nan_to_num(np.nanmean(feat2))
feat2 = | np.nan_to_num(feat2[:min_len], nan=mean) | numpy.nan_to_num |
import numpy as np
import pytest
from continuum.scenarios import Rotations
from tests.test_classorder import InMemoryDatasetTest
from continuum.datasets import MNIST, CIFAR100
@pytest.fixture
def numpy_data():
nb_classes = 6
nb_data = 100
x_train = []
y_train = []
for i in range(nb_classes):
x_train.append(np.ones((nb_data, 4, 4, 3), dtype=np.uint8) * i)
y_train.append( | np.ones(nb_data) | numpy.ones |
"""
Potential field transformations, like upward continuation and derivatives.
.. note:: Most, if not all, functions here required gridded data.
**Transformations**
* :func:`~fatiando.gravmag.transform.upcontinue`: Upward continuation of
gridded potential field data on a level surface.
* :func:`~fatiando.gravmag.transform.reduce_to_pole`: Reduce the total field
magnetic anomaly to the pole.
* :func:`~fatiando.gravmag.transform.tga`: Calculate the amplitude of the
total gradient (also called the analytic signal)
* :func:`~fatiando.gravmag.transform.tilt`: Calculates the tilt angle
* :func:`~fatiando.gravmag.transform.power_density_spectra`: Calculates
the Power Density Spectra of a gridded potential field data.
* :func:`~fatiando.gravmag.transform.radial_average`: Calculates the
the radial average of a Power Density Spectra using concentring rings.
**Derivatives**
* :func:`~fatiando.gravmag.transform.derivx`: Calculate the n-th order
derivative of a potential field in the x-direction (North-South)
* :func:`~fatiando.gravmag.transform.derivy`: Calculate the n-th order
derivative of a potential field in the y-direction (East-West)
* :func:`~fatiando.gravmag.transform.derivz`: Calculate the n-th order
derivative of a potential field in the z-direction
----
"""
from __future__ import division, absolute_import
import warnings
import numpy
from .. import utils
def reduce_to_pole(x, y, data, shape, inc, dec, sinc, sdec):
r"""
Reduce total field magnetic anomaly data to the pole.
The reduction to the pole if a phase transformation that can be applied to
total field magnetic anomaly data. It "simulates" how the data would be if
**both** the Geomagnetic field and the magnetization of the source were
vertical (:math:`90^\circ` inclination) (Blakely, 1996).
This functions performs the reduction in the frequency domain (using the
FFT). The transform filter is (in the frequency domain):
.. math::
RTP(k_x, k_y) = \frac{|k|}{
a_1 k_x^2 + a_2 k_y^2 + a_3 k_x k_y +
i|k|(b_1 k_x + b_2 k_y)}
in which :math:`k_x` and :math:`k_y` are the wave-numbers in the x and y
directions and
.. math::
|k| = \sqrt{k_x^2 + k_y^2} \\
a_1 = m_z f_z - m_x f_x \\
a_2 = m_z f_z - m_y f_y \\
a_3 = -m_y f_x - m_x f_y \\
b_1 = m_x f_z + m_z f_x \\
b_2 = m_y f_z + m_z f_y
:math:`\mathbf{m} = (m_x, m_y, m_z)` is the unit-vector of the total
magnetization of the source and
:math:`\mathbf{f} = (f_x, f_y, f_z)` is the unit-vector of the Geomagnetic
field.
.. note:: Requires gridded data.
.. warning::
The magnetization direction of the anomaly source is crucial to the
reduction-to-the-pole.
**Wrong values of *sinc* and *sdec* will lead to a wrong reduction.**
Parameters:
* x, y : 1d-arrays
The x, y, z coordinates of each data point.
* data : 1d-array
The total field anomaly data at each point.
* shape : tuple = (nx, ny)
The shape of the data grid
* inc, dec : floats
The inclination and declination of the inducing Geomagnetic field
* sinc, sdec : floats
The inclination and declination of the total magnetization of the
anomaly source. The total magnetization is the vector sum of the
induced and remanent magnetization. If there is only induced
magnetization, use the *inc* and *dec* of the Geomagnetic field.
Returns:
* rtp : 1d-array
The data reduced to the pole.
References:
Blakely, <NAME>. (1996), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
fx, fy, fz = utils.ang2vec(1, inc, dec)
if sinc is None or sdec is None:
mx, my, mz = fx, fy, fz
else:
mx, my, mz = utils.ang2vec(1, sinc, sdec)
kx, ky = [k for k in _fftfreqs(x, y, shape, shape)]
kz_sqr = kx**2 + ky**2
a1 = mz*fz - mx*fx
a2 = mz*fz - my*fy
a3 = -my*fx - mx*fy
b1 = mx*fz + mz*fx
b2 = my*fz + mz*fy
# The division gives a RuntimeWarning because of the zero frequency term.
# This suppresses the warning.
with numpy.errstate(divide='ignore', invalid='ignore'):
rtp = (kz_sqr)/(a1*kx**2 + a2*ky**2 + a3*kx*ky +
1j*numpy.sqrt(kz_sqr)*(b1*kx + b2*ky))
rtp[0, 0] = 0
ft_pole = rtp*numpy.fft.fft2(numpy.reshape(data, shape))
return numpy.real(numpy.fft.ifft2(ft_pole)).ravel()
def upcontinue(x, y, data, shape, height):
r"""
Upward continuation of potential field data.
Calculates the continuation through the Fast Fourier Transform in the
wavenumber domain (Blakely, 1996):
.. math::
F\{h_{up}\} = F\{h\} e^{-\Delta z |k|}
and then transformed back to the space domain. :math:`h_{up}` is the upward
continue data, :math:`\Delta z` is the height increase, :math:`F` denotes
the Fourier Transform, and :math:`|k|` is the wavenumber modulus.
.. note:: Requires gridded data.
.. note:: x, y, z and height should be in meters.
.. note::
It is not possible to get the FFT of a masked grid. The default
:func:`fatiando.gridder.interp` call using minimum curvature will not
be suitable. Use ``extrapolate=True`` or ``algorithm='nearest'`` to
get an unmasked grid.
Parameters:
* x, y : 1D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid
* height : float
The height increase (delta z) in meters.
Returns:
* cont : array
The upward continued data
References:
<NAME>. (1996), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
assert x.shape == y.shape, \
"x and y arrays must have same shape"
if height <= 0:
warnings.warn("Using 'height' <= 0 means downward continuation, " +
"which is known to be unstable.")
nx, ny = shape
# Pad the array with the edge values to avoid instability
padded, padx, pady = _pad_data(data, shape)
kx, ky = _fftfreqs(x, y, shape, padded.shape)
kz = numpy.sqrt(kx**2 + ky**2)
upcont_ft = numpy.fft.fft2(padded)*numpy.exp(-height*kz)
cont = numpy.real(numpy.fft.ifft2(upcont_ft))
# Remove padding
cont = cont[padx: padx + nx, pady: pady + ny].ravel()
return cont
def _upcontinue_space(x, y, data, shape, height):
"""
Upward continuation using the space-domain formula.
DEPRECATED. Use the better implementation using FFT. Kept here for
historical reasons.
"""
nx, ny = shape
dx = (x.max() - x.min())/(nx - 1)
dy = (y.max() - y.min())/(ny - 1)
area = dx*dy
deltaz_sqr = (height)**2
cont = numpy.zeros_like(data)
for i, j, g in zip(x, y, data):
cont += g*area*((x - i)**2 + (y - j)**2 + deltaz_sqr)**(-1.5)
cont *= abs(height)/(2*numpy.pi)
return cont
def tga(x, y, data, shape, method='fd'):
r"""
Calculate the total gradient amplitude (TGA).
This the same as the `3D analytic signal` of Roest et al. (1992), but we
prefer the newer, more descriptive nomenclature suggested by Reid (2012).
The TGA is defined as the amplitude of the gradient vector of a potential
field :math:`T` (e.g. the magnetic total field anomaly):
.. math::
TGA = \sqrt{
\left(\frac{\partial T}{\partial x}\right)^2 +
\left(\frac{\partial T}{\partial y}\right)^2 +
\left(\frac{\partial T}{\partial z}\right)^2 }
.. note:: Requires gridded data.
.. warning::
If the data is not in SI units, the derivatives will be in
strange units and so will the total gradient amplitude! I strongly
recommend converting the data to SI **before** calculating the
TGA is you need the gradient in Eotvos (use one of the unit conversion
functions of :mod:`fatiando.utils`).
Parameters:
* x, y : 1D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid
* method : string
The method used to calculate the horizontal derivatives. Options are:
``'fd'`` for finite-difference (more stable) or ``'fft'`` for the Fast
Fourier Transform. The z derivative is always calculated by FFT.
Returns:
* tga : 1D-array
The amplitude of the total gradient
References:
<NAME>. (2012), Forgotten truths, myths and sacred cows of Potential
Fields Geophysics - II, in SEG Technical Program Expanded Abstracts 2012,
pp. 1-3, Society of Exploration Geophysicists.
<NAME>., <NAME>, and <NAME> (1992), Magnetic interpretation
using the 3-D analytic signal, GEOPHYSICS, 57(1), 116-125,
doi:10.1190/1.1443174.
"""
dx = derivx(x, y, data, shape, method=method)
dy = derivy(x, y, data, shape, method=method)
dz = derivz(x, y, data, shape)
res = numpy.sqrt(dx ** 2 + dy ** 2 + dz ** 2)
return res
def tilt(x, y, data, shape, xderiv=None, yderiv=None, zderiv=None):
r"""
Calculates the potential field tilt, as defined by Miller and Singh (1994)
.. math::
tilt(f) = tan^{-1}\left(
\frac{
\frac{\partial T}{\partial z}}{
\sqrt{\frac{\partial T}{\partial x}^2 +
\frac{\partial T}{\partial y}^2}}
\right)
When used on magnetic total field anomaly data, works best if the data is
reduced to the pole.
It's useful to plot the zero contour line of the tilt to represent possible
outlines of the source bodies. Use matplotlib's ``pyplot.contour`` or
``pyplot.tricontour`` for this.
.. note::
Requires gridded data if ``xderiv``, ``yderiv`` and ``zderiv`` are not
given.
Parameters:
* x, y : 1D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid. Ignored if *xderiv*, *yderiv* and *zderiv* are
given.
* xderiv : 1D-array or None
Optional. Values of the derivative in the x direction.
If ``None``, will calculated using the default options of
:func:`~fatiando.gravmag.transform.derivx`
* yderiv : 1D-array or None
Optional. Values of the derivative in the y direction.
If ``None``, will calculated using the default options of
:func:`~fatiando.gravmag.transform.derivy`
* zderiv : 1D-array or None
Optional. Values of the derivative in the z direction.
If ``None``, will calculated using the default options of
:func:`~fatiando.gravmag.transform.derivz`
Returns:
* tilt : 1D-array
The tilt angle of the total field in radians.
References:
Miller, <NAME>, and <NAME>. 1994. "Potential Field Tilt --- a New
Concept for Location of Potential Field Sources."
Journal of Applied Geophysics 32 (2--3): 213-17.
doi:10.1016/0926-9851(94)90022-1.
"""
if xderiv is None:
xderiv = derivx(x, y, data, shape)
if yderiv is None:
yderiv = derivy(x, y, data, shape)
if zderiv is None:
zderiv = derivz(x, y, data, shape)
horiz_deriv = numpy.sqrt(xderiv**2 + yderiv**2)
tilt = numpy.arctan2(zderiv, horiz_deriv)
return tilt
def derivx(x, y, data, shape, order=1, method='fd'):
"""
Calculate the derivative of a potential field in the x direction.
.. note:: Requires gridded data.
.. warning::
If the data is not in SI units, the derivative will be in
strange units! I strongly recommend converting the data to SI
**before** calculating the derivative (use one of the unit conversion
functions of :mod:`fatiando.utils`). This way the derivative will be in
SI units and can be easily converted to what unit you want.
Parameters:
* x, y : 1D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid
* order : int
The order of the derivative
* method : string
The method used to calculate the derivatives. Options are:
``'fd'`` for central finite-differences (more stable) or ``'fft'``
for the Fast Fourier Transform.
Returns:
* deriv : 1D-array
The derivative
"""
nx, ny = shape
assert method in ['fft', 'fd'], \
'Invalid method "{}".'.format(method)
if method == 'fft':
# Pad the array with the edge values to avoid instability
padded, padx, pady = _pad_data(data, shape)
kx, _ = _fftfreqs(x, y, shape, padded.shape)
deriv_ft = numpy.fft.fft2(padded)*(kx*1j)**order
deriv_pad = numpy.real(numpy.fft.ifft2(deriv_ft))
# Remove padding from derivative
deriv = deriv_pad[padx: padx + nx, pady: pady + ny]
elif method == 'fd':
datamat = data.reshape(shape)
dx = (x.max() - x.min())/(nx - 1)
deriv = numpy.empty_like(datamat)
deriv[1:-1, :] = (datamat[2:, :] - datamat[:-2, :])/(2*dx)
deriv[0, :] = deriv[1, :]
deriv[-1, :] = deriv[-2, :]
if order > 1:
deriv = derivx(x, y, deriv, shape, order=order - 1, method='fd')
return deriv.ravel()
def derivy(x, y, data, shape, order=1, method='fd'):
"""
Calculate the derivative of a potential field in the y direction.
.. note:: Requires gridded data.
.. warning::
If the data is not in SI units, the derivative will be in
strange units! I strongly recommend converting the data to SI
**before** calculating the derivative (use one of the unit conversion
functions of :mod:`fatiando.utils`). This way the derivative will be in
SI units and can be easily converted to what unit you want.
Parameters:
* x, y : 1D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid
* order : int
The order of the derivative
* method : string
The method used to calculate the derivatives. Options are:
``'fd'`` for central finite-differences (more stable) or ``'fft'``
for the Fast Fourier Transform.
Returns:
* deriv : 1D-array
The derivative
"""
nx, ny = shape
assert method in ['fft', 'fd'], \
'Invalid method "{}".'.format(method)
if method == 'fft':
# Pad the array with the edge values to avoid instability
padded, padx, pady = _pad_data(data, shape)
_, ky = _fftfreqs(x, y, shape, padded.shape)
deriv_ft = numpy.fft.fft2(padded)*(ky*1j)**order
deriv_pad = numpy.real(numpy.fft.ifft2(deriv_ft))
# Remove padding from derivative
deriv = deriv_pad[padx: padx + nx, pady: pady + ny]
elif method == 'fd':
datamat = data.reshape(shape)
dy = (y.max() - y.min())/(ny - 1)
deriv = numpy.empty_like(datamat)
deriv[:, 1:-1] = (datamat[:, 2:] - datamat[:, :-2])/(2*dy)
deriv[:, 0] = deriv[:, 1]
deriv[:, -1] = deriv[:, -2]
if order > 1:
deriv = derivy(x, y, deriv, shape, order=order - 1, method='fd')
return deriv.ravel()
def derivz(x, y, data, shape, order=1, method='fft'):
"""
Calculate the derivative of a potential field in the z direction.
.. note:: Requires gridded data.
.. warning::
If the data is not in SI units, the derivative will be in
strange units! I strongly recommend converting the data to SI
**before** calculating the derivative (use one of the unit conversion
functions of :mod:`fatiando.utils`). This way the derivative will be in
SI units and can be easily converted to what unit you want.
Parameters:
* x, y : 1D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid
* order : int
The order of the derivative
* method : string
The method used to calculate the derivatives. Options are:
``'fft'`` for the Fast Fourier Transform.
Returns:
* deriv : 1D-array
The derivative
"""
assert method == 'fft', \
"Invalid method '{}'".format(method)
nx, ny = shape
# Pad the array with the edge values to avoid instability
padded, padx, pady = _pad_data(data, shape)
kx, ky = _fftfreqs(x, y, shape, padded.shape)
deriv_ft = numpy.fft.fft2(padded)*numpy.sqrt(kx**2 + ky**2)**order
deriv = numpy.real(numpy.fft.ifft2(deriv_ft))
# Remove padding from derivative
return deriv[padx: padx + nx, pady: pady + ny].ravel()
def power_density_spectra(x, y, data, shape):
r"""
Calculates the Power Density Spectra of a 2D gridded potential field
through the FFT:
.. math::
\Phi_{\Delta T}(k_x, k_y) = | F\left{\Delta T \right}(k_x, k_y) |^2
.. note:: Requires gridded data.
.. note:: x, y, z and height should be in meters.
Parameters:
* x, y : 1D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid
Returns:
* kx, ky : 2D-arrays
The wavenumbers of each Power Density Spectra point
* pds : 2D-array
The Power Density Spectra of the data
"""
kx, ky = _fftfreqs(x, y, shape, shape)
pds = abs(numpy.fft.fft2(numpy.reshape(data, shape)))**2
return kx, ky, pds
def radial_average_spectrum(kx, ky, pds, max_radius=None, ring_width=None):
r"""
Calculates the average of the Power Density Spectra points that falls
inside concentric rings built around the origin of the wavenumber
coordinate system with constant width.
The width of the rings and the inner radius of the biggest ring can be
changed by setting the optional parameters ring_width and max_radius,
respectively.
.. note:: To calculate the radially averaged power density spectra
use the outputs of the function power_density_spectra as
input of this one.
Parameters:
* kx, ky : 2D-arrays
The x and y coordinates of the grid points
* data : 1D-array
The potential field at the grid points
* shape : tuple = (nx, ny)
The shape of the grid
* max_radius : float (optional)
Inner radius of the biggest ring.
By default it's set as the minimum of kx.max() and ky.max().
Making it smaller leaves points outside of the averaging,
and making it bigger includes points nearer to the boundaries.
* ring_width : float (optional)
Width of the rings.
By default it's set as the largest value of :math:`\Delta k_x` and
:math:`\Delta k_y`, being them the equidistances of the kx and ky
arrays.
Making it bigger gives more populated averages, and
making it smaller lowers the ammount of points per ring
(use it carefully).
Returns:
* k_radial : 1D-array
Wavenumbers of each Radially Averaged Power Spectrum point.
Also, the inner radius of the rings.
* pds_radial : 1D array
Radially Averaged Power Spectrum
"""
nx, ny = pds.shape
if max_radius is None:
max_radius = min(kx.max(), ky.max())
if ring_width is None:
ring_width = max(kx[1, 0], ky[0, 1])
k = numpy.sqrt(kx**2 + ky**2)
pds_radial = []
k_radial = []
radius_i = -1
while True:
radius_i += 1
if radius_i*ring_width > max_radius:
break
else:
if radius_i == 0:
inside = k <= 0.5*ring_width
else:
inside = numpy.logical_and(k > (radius_i - 0.5)*ring_width,
k <= (radius_i + 0.5)*ring_width)
pds_radial.append(pds[inside].mean())
k_radial.append(radius_i*ring_width)
return numpy.array(k_radial), numpy.array(pds_radial)
def _pad_data(data, shape):
n = _nextpow2(numpy.max(shape))
nx, ny = shape
padx = (n - nx)//2
pady = (n - ny)//2
padded = numpy.pad(data.reshape(shape), ((padx, padx), (pady, pady)),
mode='edge')
return padded, padx, pady
def _nextpow2(i):
buf = numpy.ceil(numpy.log(i)/numpy.log(2))
return int(2**buf)
def _fftfreqs(x, y, shape, padshape):
"""
Get two 2D-arrays with the wave numbers in the x and y directions.
"""
nx, ny = shape
dx = (x.max() - x.min())/(nx - 1)
fx = 2*numpy.pi*numpy.fft.fftfreq(padshape[0], dx)
dy = (y.max() - y.min())/(ny - 1)
fy = 2*numpy.pi*numpy.fft.fftfreq(padshape[1], dy)
return | numpy.meshgrid(fy, fx) | numpy.meshgrid |
"""
April 2018
Simulates the trajectory implementing a CZ gate.
June 2018
Included noise in the simulation.
July 2018
Added distortions to simulation.
September 2018
Added flux noise as a quasi-static component with Gaussian distribution
"""
import time
import numpy as np
import qutip as qtp
from pycqed.measurement import detector_functions as det
from scipy.interpolate import interp1d
from pycqed.measurement.waveform_control_CC import waveforms_flux as wfl
import scipy
import matplotlib.pyplot as plt
import logging
#np.set_printoptions(threshold=np.inf)
# operators
b = qtp.tensor(qtp.destroy(3), qtp.qeye(3)) # LSB is static qubit
a = qtp.tensor(qtp.qeye(3), qtp.destroy(3))
n_q0 = a.dag() * a
n_q1 = b.dag() * b
H_coupling = (a.dag() + a) * (b + b.dag())
H_c = n_q0
scalefactor=1 # scalefactor not used anymore
# Hamiltonian
def coupled_transmons_hamiltonian(w_q0, w_q1, alpha_q0, alpha_q1, J, w_bus):
"""
Hamiltonian of two coupled anharmonic transmons.
Because the intention is to tune one qubit into resonance with the other,
the number of levels is limited.
q1 -> static qubit, 3-levels
q0 -> fluxing qubit, 3-levels
intended avoided crossing:
11 <-> 02 (q1 is the first (left) qubit and q0 the second (right) one)
N.B. the frequency of q0 is expected to be larger than that of q1
w_q0 > w_q1
and the anharmonicities alpha negative
"""
raise NotImplementedError("Old way of handling the hamiltonian H_0. Use calc_hamiltonian")
eps=0
delta_q1=w_q1-w_bus
delta_q0_interactionpoint=(w_q1-alpha_q0)-w_bus
delta_q0=(w_q0+eps)-w_bus
J_new = J / ((delta_q1+delta_q0_interactionpoint)/(delta_q1*delta_q0_interactionpoint)) * (delta_q1+delta_q0)/(delta_q1*delta_q0)
H_0 = w_q0 * n_q0 + w_q1 * n_q1 + \
1/2*alpha_q0*(a.dag()*a.dag()*a*a) + 1/2*alpha_q1*(b.dag()*b.dag()*b*b) +\
J_new * (a.dag() + a) * (b + b.dag())
return H_0
def hamiltonian_timedependent(H_0,eps,w_bus):
raise NotImplementedError("Old way of handling the hamiltonian time-dependent. Use calc_hamiltonian")
w_q0=np.real(H_0[1,1])
w_q1=np.real(H_0[3,3])
alpha_q0=np.real(H_0[2,2])-2*w_q0
J=np.real(H_0[1,3])
delta_q1=w_q1-w_bus
delta_q0_sweetspot=(w_q0)-w_bus
delta_q0=(w_q0+eps)-w_bus
J_new = J / ((delta_q1+delta_q0_sweetspot)/(delta_q1*delta_q0_sweetspot)) * (delta_q1+delta_q0)/(delta_q1*delta_q0)
return H_0+eps*H_c+(J_new-J)*H_coupling
# target in the case with no noise
# note that the Hilbert space is H_q1 /otimes H_q0
# so the ordering of basis states below is 00,01,02,10,11,12,20,21,22
U_target = qtp.Qobj([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]],
type='oper',
dims=[[3, 3], [3, 3]])
#U_target._type = 'oper'
U_target_diffdims = qtp.Qobj([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]],
type='oper',
dims=[[9], [9]]) # otherwise average_gate_fidelity doesn't work
# if there is noise the target is the corresponding superoperator
U_super_target = qtp.to_super(U_target)
'''
remember that qutip uses the Liouville (matrix) representation for superoperators,
with column stacking.
This means that
rho_{xy,x'y'}=rho[3*x+y,3*x'+y']
rho_{xy,x'y'}=operator_to_vector(rho)[3*x+y+27*x'+9*y']
where xy is the row and x'y' is the column
'''
def plot(x_plot_vec,y_plot_vec,title='No title',xlabel='No xlabel',ylabel='No ylabel',legend_labels=list(),yscale='linear'):
# tool for plotting
# x_plot_vec and y_plot_vec should be passed as either lists or np.array
if isinstance(y_plot_vec,list):
y_length=len(y_plot_vec)
else:
y_length=np.size(y_plot_vec)
if legend_labels==[]:
legend_labels=np.arange(y_length)
for i in range(y_length):
if isinstance(y_plot_vec[i],list):
y_plot_vec[i]=np.array(y_plot_vec[i])
if isinstance(legend_labels[i],int):
legend_labels[i]=str(legend_labels[i])
if len(x_plot_vec)==1:
if isinstance(x_plot_vec[0],list):
x_plot_vec[0]=np.array(x_plot_vec[0])
plt.plot(x_plot_vec[0], y_plot_vec[i], label=legend_labels[i])
else:
if isinstance(x_plot_vec[i],list):
x_plot_vec[i]=np.array(x_plot_vec[i])
plt.plot(x_plot_vec[i], y_plot_vec[i], label=legend_labels[i])
plt.legend()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.yscale(yscale)
plt.show()
def jump_operators(T1_q0,T1_q1):
c_ops=[]
if T1_q0 != 0:
c_ops.append(np.sqrt(1/T1_q0)*a)
if T1_q1 != 0:
c_ops.append(np.sqrt(1/T1_q1)*b)
return c_ops
def c_ops_amplitudedependent(T1_q0,T1_q1,Tphi01_q0_vec,Tphi01_q1):
# case where the pure decoherence for qubit q0 is time dependent, or better pulse-amplitude dependent
c_ops=[]
if T1_q0 != 0:
c_ops.append(np.sqrt(1/T1_q0)*a)
if T1_q1 != 0:
c_ops.append(np.sqrt(1/T1_q1)*b)
if Tphi01_q1 != 0: # we automatically put also the decoherence for 12 and 02
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,-1,0],
[0,0,0]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(collapse*np.sqrt(1/(2*Tphi01_q1)))
Tphi12_q1=Tphi01_q1
sigmaZinqutrit = qtp.Qobj([[0,0,0],
[0,1,0],
[0,0,-1]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(collapse*np.sqrt(1/(2*Tphi12_q1)))
Tphi02_q1=Tphi01_q1/2
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,0,0],
[0,0,-1]])
collapse=qtp.tensor(sigmaZinqutrit,qtp.qeye(3))
c_ops.append(collapse*np.sqrt(1/(2*Tphi02_q1)))
if Tphi01_q0_vec != []: # we automatically put also the decoherence for 12 and 02
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,-1,0],
[0,0,0]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append([collapse,np.sqrt(1/(2*Tphi01_q0_vec))])
Tphi12_q0_vec=Tphi01_q0_vec
sigmaZinqutrit = qtp.Qobj([[0,0,0],
[0,1,0],
[0,0,-1]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append([collapse,np.sqrt(1/(2*Tphi12_q0_vec))])
Tphi02_q0_vec=Tphi01_q0_vec/2
sigmaZinqutrit = qtp.Qobj([[1,0,0],
[0,0,0],
[0,0,-1]])
collapse=qtp.tensor(qtp.qeye(3),sigmaZinqutrit)
c_ops.append([collapse,np.sqrt(1/(2*Tphi02_q0_vec))])
return c_ops
def rotating_frame_transformation_propagator(U, t: float,
w_q0: float=0, w_q1: float =0):
"""
Transforms the frame of the unitary according to
U' = U_{RF}*U
NOTE: remember that this is how the time evolution operator changes from one picture to another
with
U_{RF} = e^{-i w_q0 a^dag a t } otimes e^{-i w_q1 b^dag b t }
(method for the case where we are simply rotating away the two qubit frequencies)
Args:
U (QObj): Unitary to be transformed
t (float): time at which to transform
w_q0 (float): freq of frame for q0
w_q1 (float): freq of frame for q1
"""
logging.warning('Recommended to use rotating_frame_transformation_new passing the hamiltonian as an argument.')
U_RF = (1j*w_q0*n_q0*t).expm() * (1j*w_q1*n_q1*t).expm()
if U.type=='super':
U_RF=qtp.to_super(U_RF)
U_prime = U_RF * U
""" U_RF only on one side because that's the operator that
satisfies the Schroedinger equation in the interaction picture.
"""
return U_prime
def rotating_frame_transformation_propagator_new(U, t: float, H):
"""
Transforms the frame of the unitary according to
U' = U_{RF}*U
NOTE: remember that this is how the time evolution operator changes from one picture to another
Args:
U (QObj): Unitary to be transformed
t (float): time at which to transform
H (QObj): hamiltonian to be rotated away
"""
U_RF = (1j*H*t).expm()
if U.type=='super':
U_RF=qtp.to_super(U_RF)
U_prime = U_RF * U
""" U_RF only on one side because that's the operator that
satisfies the Schroedinger equation in the interaction picture.
"""
return U_prime
def rotating_frame_transformation_operators(operator, t: float, H):
"""
Transforms the frame of an operator (hamiltonian, or jump operator) according to
O' = U_{RF}*O*U_{RF}^dag
Args:
operator (QObj): operator to be transformed
t (float): time at which to transform
H (QObj): hamiltonian to be rotated away
"""
U_RF = (1j*H*t).expm()
return U_RF * H * U_RF.dag()
def correct_reference(U,w_q1,w_q0,t):
# w_qi should be a frequency (not including the 2*pi factor). Moreover they and t should be in the same scale.
# this functions should be used just to make sanity checks.
phase_to_correct_q1 = w_q1*(2*np.pi)*t
phase_to_correct_q0 = w_q0*(2*np.pi)*t
Ucorrection = qtp.Qobj([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, np.exp(1j*phase_to_correct_q0), 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, np.exp(1j*phase_to_correct_q1), 0, 0, 0, 0, 0],
[0, 0, 0, 0, np.exp(1j*(phase_to_correct_q0+phase_to_correct_q1)), 0, 0, 0, 0],
[0, 0, 0, 0, 0, np.exp(1j*phase_to_correct_q1), 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, np.exp(1j*phase_to_correct_q0), 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]],
type='oper',
dims=[[3, 3], [3, 3]])
if U.type=='oper':
return Ucorrection*U
elif U.type=='super':
return qtp.to_super(Ucorrection)*U
def phases_from_superoperator(U):
"""
Returns the phases from the unitary or superoperator U
"""
if U.type=='oper':
phi_00 = np.rad2deg(np.angle(U[0, 0])) # expected to equal 0 because of our
# choice for the energy, not because of rotating frame. But not guaranteed including the coupling
phi_01 = np.rad2deg(np.angle(U[1, 1]))
phi_10 = np.rad2deg(np.angle(U[3, 3]))
phi_11 = np.rad2deg(np.angle(U[4, 4]))
phi_02 = np.rad2deg(np.angle(U[2, 2])) # used only for avgatefid_superoperator_phasecorrected
phi_20 = np.rad2deg(np.angle(U[6, 6])) # used only for avgatefid_superoperator_phasecorrected
elif U.type=='super':
phi_00 = 0 # we set it to 0 arbitrarily but it is indeed not knowable
phi_01 = np.rad2deg(np.angle(U[1, 1])) # actually phi_01-phi_00 etc
phi_10 = np.rad2deg(np.angle(U[3, 3]))
phi_11 = np.rad2deg(np.angle(U[4, 4]))
phi_02 = np.rad2deg(np.angle(U[2, 2]))
phi_20 = np.rad2deg(np.angle(U[6, 6]))
phi_cond = (phi_11 - phi_01 - phi_10 + phi_00) % 360 # still the right formula independently from phi_00
return phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond
def pro_avfid_superoperator_compsubspace(U,L1):
"""
Average process (gate) fidelity in the qubit computational subspace for two qutrits.
Leakage has to be taken into account, see Woods & Gambetta.
The function assumes that the computational subspace (:= the 4 energy levels chosen as the two qubits) is given by
the standard basis |0> /otimes |0>, |0> /otimes |1>, |1> /otimes |0>, |1> /otimes |1>.
If this is not the case, one need to change the basis to that one, before calling this function.
"""
if U.type=='oper':
inner = U.dag()*U_target
part_idx = [0, 1, 3, 4] # only computational subspace
ptrace = 0
for i in part_idx:
ptrace += inner[i, i]
dim = 4 # 2 qubits comp subspace
return np.real(((np.abs(ptrace))**2+dim*(1-L1))/(dim*(dim+1)))
elif U.type=='super':
kraus_form = qtp.to_kraus(U)
dim=4 # 2 qubits in the computational subspace
part_idx = [0, 1, 3, 4] # only computational subspace
psum=0
for A_k in kraus_form:
ptrace = 0
inner = U_target_diffdims.dag()*A_k # otherwise dimension mismatch
for i in part_idx:
ptrace += inner[i, i]
psum += (np.abs(ptrace))**2
return np.real((dim*(1-L1) + psum) / (dim*(dim + 1)))
def pro_avfid_superoperator_compsubspace_phasecorrected(U,L1,phases):
"""
Average process (gate) fidelity in the qubit computational subspace for two qutrits
Leakage has to be taken into account, see Woods & Gambetta
The phase is corrected with Z rotations considering both transmons as qubits. The correction is done perfectly.
The function assumes that the computational subspace (:= the 4 energy levels chosen as the two qubits) is given by
the standard basis |0> /otimes |0>, |0> /otimes |1>, |1> /otimes |0>, |1> /otimes |1>.
If this is not the case, one need to change the basis to that one, before calling this function.
"""
Ucorrection = qtp.Qobj([[np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0, 0, 0],
[0, np.exp(-1j*np.deg2rad(phases[1])), 0, 0, 0, 0, 0, 0, 0],
[0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0],
[0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0, 0, 0],
[0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[3]-phases[-1])), 0, 0, 0, 0],
[0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[2])), 0, 0, 0],
[0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0],
[0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[1])), 0],
[0, 0, 0, 0, 0, 0, 0, 0, np.exp(-1j*np.deg2rad(phases[0]))]],
type='oper',
dims=[[3, 3], [3, 3]])
if U.type=='oper':
U=Ucorrection*U
inner = U.dag()*U_target
part_idx = [0, 1, 3, 4] # only computational subspace
ptrace = 0
for i in part_idx:
ptrace += inner[i, i]
dim = 4 # 2 qubits comp subspace
return np.real(((np.abs(ptrace))**2+dim*(1-L1))/(dim*(dim+1)))
elif U.type=='super':
U=qtp.to_super(Ucorrection)*U
kraus_form = qtp.to_kraus(U)
dim=4 # 2 qubits in the computational subspace
part_idx = [0, 1, 3, 4] # only computational subspace
psum=0
for A_k in kraus_form:
ptrace = 0
inner = U_target_diffdims.dag()*A_k # otherwise dimension mismatch
for i in part_idx:
ptrace += inner[i, i]
psum += (np.abs(ptrace))**2
return np.real((dim*(1-L1) + psum) / (dim*(dim + 1)))
def pro_avfid_superoperator_compsubspace_phasecorrected_onlystaticqubit(U,L1,phases):
"""
Average process (gate) fidelity in the qubit computational subspace for two qutrits
Leakage has to be taken into account, see Woods & Gambetta
The phase is corrected with Z rotations considering both transmons as qubits. The correction is done perfectly.
The function assumes that the computational subspace (:= the 4 energy levels chosen as the two qubits) is given by
the standard basis |0> /otimes |0>, |0> /otimes |1>, |1> /otimes |0>, |1> /otimes |1>.
If this is not the case, one need to change the basis to that one, before calling this function.
"""
Ucorrection = qtp.Qobj([[np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0, 0, 0],
[0, np.exp(-1j*np.deg2rad(phases[0])), 0, 0, 0, 0, 0, 0, 0],
[0, 0, np.exp(-1j* | np.deg2rad(phases[0]) | numpy.deg2rad |
"""
isicarchive.jitfunc
This module provides JIT (numba) helper functions and doesn't have to
be imported from outside the main package functionality (isicapi).
Functions
---------
conv_kernel
Generate convolution smoothing kernel
image_mix
Mix two images (RGB and/or gray scale, alpha parameter supported)
image_resample_u1
Cheap (!) image resampling for uint8 images
image_resample_f4
Cheap (!) image resampling for float32 images
superpixel_contour
Extract superpixel contour
superpixel_decode
Converts an RGB superpixel image to a 2D superpixel index array
superpixel_map
Decodes a superpixel (index) array into a 2D mapping array
superpixel_outline_dir
Extract SVG path directions from binary mask of outline
superpixel_path
Extract superpixel path
svg_coord_list
Generate SVG-path-suitable list of directions from coordinates list
svg_path_from_list
Generate SVG-path-suitable list of directions from v/h list
"""
__version__ = '0.4.8'
from typing import Optional, Tuple
import numba
from numba import jit, prange
import numpy
# convolution (smoothing) kernel
@jit('f4[:](f4)', nopython=True)
def conv_kernel(fwhm:numpy.float32 = 2.0) -> numpy.ndarray:
"""
Generate convolution smoothing kernel
Parameters
----------
fwhm : numpy scalar float32
Gaussian kernel size in FWHM (full-width at half-maximum)
Returns
-------
kernel : ndarray
Gaussian smoothing kernel (numpy.float32)
"""
if fwhm <= 0.29:
return numpy.asarray([0,1,0]).astype(numpy.float32)
fwhm = fwhm / numpy.sqrt(8.0 * numpy.log(2.0))
if fwhm < 2.0:
md = numpy.trunc(0.5 + 6.0 * fwhm)
else:
md = numpy.trunc(0.5 + 6.0 * numpy.log2(fwhm) * fwhm)
k = numpy.exp(-((numpy.arange(-md,md+1.0,1.0) ** 2) / (2.0 * fwhm * fwhm)))
k = k[k >= 0.00000001]
return (k / numpy.sum(k)).astype(numpy.float32)
# image convolution (cheap!)
@jit('f4[:,:](f4[:,:],f4[:])', nopython=True)
def image_conv_float(
data:numpy.ndarray,
kernel:numpy.ndarray,
) -> numpy.ndarray:
"""
Two-dimensional image convolution with kernel vector (staggered)
Parameters
----------
data : ndarray
Image data (must be 2D numpy.float32!)
kernel : ndarray
Kernel vector (must be numpy.float32!)
Returns
-------
conv_data : ndarray
Convolved data array
"""
if (kernel.size) == 1:
kernel = conv_kernel(kernel[0])
if (kernel.size % 2) != 1:
raise ValueError('Parameter kernel must have odd length of elements.')
s = numpy.sum(kernel)
if s <= 0.0:
raise ValueError('Parameter kernel must have a positive sum.')
if s < 0.999999 or s > 1.000001:
kernel = kernel / s
ds0 = data.shape[0]
ds1 = data.shape[1]
kh = kernel.size // 2
temp = numpy.zeros(data.size, dtype=numpy.float32).reshape(data.shape)
tempv = numpy.zeros(ds0, dtype=numpy.float32)
for c in prange(ds0): #pylint: disable=not-an-iterable
col = temp[c,:]
colv = 0.0
for k in range(kernel.size):
dc = c + k - kh
if dc < 0 or dc >= ds0:
continue
colv += kernel[k]
col += kernel[k] * data[dc,:]
temp[c,:] = col
tempv[c] = colv
temp = numpy.true_divide(temp, tempv.reshape((ds0,1,)))
out = numpy.zeros(data.size, dtype=numpy.float32).reshape(data.shape)
tempv = numpy.zeros(ds1, dtype=numpy.float32)
for c in prange(ds1): #pylint: disable=not-an-iterable
col = out[:,c]
colv = 0.0
for k in range(kernel.size):
dc = c + k - kh
if dc < 0 or dc >= ds1:
continue
colv += kernel[k]
col += kernel[k] * temp[:,dc]
out[:,c] = col
tempv[c] = colv
return numpy.true_divide(out, tempv.reshape((1,ds1,)))
# image mixing
@jit('u1[:,:](u1[:,:],u1[:,:],optional(f4[:]))', nopython=True)
def image_mix(
i1:numpy.ndarray,
i2:numpy.ndarray,
a2:numpy.ndarray = None,
) -> numpy.ndarray:
"""
Mix two images with optional alpha channel
Parameters
----------
i1, i2 : ndarray
Image vectors array (second dimension is RGB color!)
a2 : ndarray
Optional alpha (opacity) array for second image
Returns
-------
mixed : ndarray
Mixed image vectors array
"""
ishape = i1.shape
i2shape = i2.shape
oi = numpy.zeros(i1.size, dtype=numpy.uint8).reshape(ishape)
num_pix = ishape[0]
if i2shape[0] != num_pix:
raise ValueError('Images mismatch in number of pixels')
if (not a2 is None) and (a2.size != num_pix):
raise ValueError('Alpha mismatch in number of pixels')
if ishape[1] == 1:
if i2shape[1] == 1:
if a2 is None:
for p in prange(num_pix): #pylint: disable=not-an-iterable
oi[p,0] = max(i1[p,0], i2[p,0])
else:
o = numpy.float32(1.0)
for p in prange(num_pix): #pylint: disable=not-an-iterable
a = a2[p]
ia = o - a
oi[p,0] = round(
ia * numpy.float32(i1[p,0]) +
a * numpy.float32(i2[p,0]))
elif i2shape[1] != 3:
raise ValueError('i2 not a valid image array')
else:
th = numpy.float32(1.0) / numpy.float32(3)
if a2 is None:
for p in prange(num_pix): #pylint: disable=not-an-iterable
i2m = round(th * (
numpy.float32(i2[p,0]) +
numpy.float32(i2[p,1]) +
numpy.float32(i2[p,2])))
oi[p,0] = max(i1[p,0], i2m)
else:
o = numpy.float32(1.0)
for p in prange(num_pix): #pylint: disable=not-an-iterable
a = a2[p]
ia = o - a
i2m = th * (
numpy.float32(i2[p,0]) +
numpy.float32(i2[p,1]) +
numpy.float32(i2[p,2]))
oi[p,0] = round(ia * numpy.float32(i1[p,0]) + a * i2m)
elif ishape[1] != 3:
raise ValueError('i1 not a valid image array')
else:
if i2shape[1] == 1:
if a2 is None:
for p in prange(num_pix): #pylint: disable=not-an-iterable
oi[p,0] = max(i1[p,0], i2[p,0])
oi[p,1] = max(i1[p,1], i2[p,0])
oi[p,2] = max(i1[p,2], i2[p,0])
else:
o = numpy.float32(1.0)
for p in prange(num_pix): #pylint: disable=not-an-iterable
a = a2[p]
ia = o - a
i2ap = a * numpy.float32(i2[p,0])
oi[p,0] = round(ia * numpy.float32(i1[p,0]) + i2ap)
oi[p,1] = round(ia * numpy.float32(i1[p,1]) + i2ap)
oi[p,2] = round(ia * numpy.float32(i1[p,2]) + i2ap)
elif i2shape[1] != 3:
raise ValueError('i2 not a valid image array')
else:
if a2 is None:
for p in prange(num_pix): #pylint: disable=not-an-iterable
oi[p,0] = max(i1[p,0], i2[p,0])
oi[p,1] = max(i1[p,1], i2[p,1])
oi[p,2] = max(i1[p,2], i2[p,2])
else:
o = numpy.float32(1.0)
for p in prange(num_pix): #pylint: disable=not-an-iterable
a = a2[p]
ia = o - a
oi[p,0] = round(
ia * numpy.float32(i1[p,0]) +
a * numpy.float32(i2[p,0]))
oi[p,1] = round(
ia * numpy.float32(i1[p,1]) +
a * numpy.float32(i2[p,1]))
oi[p,2] = round(
ia * numpy.float32(i1[p,2]) +
a * numpy.float32(i2[p,2]))
return oi
# image resampling (cheap!)
@jit('u1[:,:,:](u1[:,:,:],i4,i4)', nopython=True)
def image_resample_u1(image:numpy.ndarray, d0:numpy.int, d1:numpy.int) -> numpy.ndarray:
"""
Cheap (!) image resampling for uint8 images
Parameters
----------
image : ndarray
Image array
d0, d1 : int
Target image size in first and second dimension
Returns
-------
res : ndarray
Resampled image array
"""
im_shape = image.shape
f0 = numpy.float(im_shape[0]) / numpy.float(d0)
f1 = numpy.float(im_shape[1]) / numpy.float(d1)
temp = numpy.zeros(im_shape[0] * d1 * im_shape[2], dtype=numpy.uint8).reshape(
(numpy.int64(im_shape[0]),numpy.int64(d1),numpy.int64(im_shape[2]),))
for c in prange(d1): #pylint: disable=not-an-iterable
ffrom = f1 * numpy.float(c) + 0.5
fto = ffrom + f1 - 1.0
ifrom = numpy.int64( | numpy.trunc(ffrom) | numpy.trunc |
import utm as UTM
import unittest
import numpy as np
class UTMTestCase(unittest.TestCase):
def assert_utm_equal(self, a, b):
self.assertTrue(np.allclose(a[0], b[0]))
self.assertTrue(np.allclose(a[1], b[1]))
self.assertEqual(a[2], b[2])
self.assertEqual(a[3].upper(), b[3].upper())
def assert_latlon_equal(self, a, b):
self.assertTrue(np.allclose(a[0], b[0], rtol=1e-4, atol=1e-4))
self.assertTrue(np.allclose(a[1], b[1], rtol=1e-4, atol=1e-4))
class KnownValues(UTMTestCase):
known_values = [
# Aachen, Germany
(
(50.77535, 6.08389),
(294409, 5628898, 32, 'U'),
{'northern': True},
),
# New York, USA
(
(40.71435, -74.00597),
(583960, 4507523, 18, 'T'),
{'northern': True},
),
# Wellington, New Zealand
(
(-41.28646, 174.77624),
(313784, 5427057, 60, 'G'),
{'northern': False},
),
# Capetown, South Africa
(
(-33.92487, 18.42406),
(261878, 6243186, 34, 'H'),
{'northern': False},
),
# Mendoza, Argentina
(
(-32.89018, -68.84405),
(514586, 6360877, 19, 'h'),
{'northern': False},
),
# Fairbanks, Alaska, USA
(
(64.83778, -147.71639),
(466013, 7190568, 6, 'W'),
{'northern': True},
),
# <NAME>, Scotland, UK
(
(56.79680, -5.00601),
(377486, 6296562, 30, 'V'),
{'northern': True},
),
# Latitude 84
(
(84, -5.00601),
(476594, 9328501, 30, 'X'),
{'northern': True},
),
]
def test_from_latlon(self):
lats = np.array([0.0, 3.0, 6.0])
lons = np.array([0.0, 1.0, 3.4])
result = UTM.from_latlon(lats, lons)
self.assert_utm_equal((np.array([166021.44317933032,
277707.83075574087,
544268.12794623]),
np.array([0.0,
331796.29167519242,
663220.7198366751]),
31, 'N'), result)
for latlon, utm, _ in self.known_values:
result = UTM.from_latlon(*[np.array([x]) for x in latlon])
self.assert_utm_equal(utm, result)
def test_to_latlon(self):
result = UTM.to_latlon(np.array([166021.44317933032,
277707.83075574087,
544268.12794623]),
np.array([0.0,
331796.29167519242,
663220.7198366751]),
31, 'N')
self.assert_latlon_equal((np.array([0.0, 3.0, 6.0]),
np.array([0.0, 1.0, 3.4])),
result)
for latlon, utm, utm_kw in self.known_values:
utm = [np.array([x]) for x in utm[:2]] + list(utm[2:])
result = UTM.to_latlon(*utm)
self.assert_latlon_equal(latlon, result)
class BadInput(UTMTestCase):
def test_from_latlon_range_checks(self):
'''from_latlon should fail with out-of-bounds input'''
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(-100), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(-80.1), np.array(0))
for i in range(-8000, 8400):
UTM.from_latlon(np.array(i / 100.0), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(84.1), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(100), np.array(0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(-300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(-180.1))
for i in range(-18000, 18000):
UTM.from_latlon(np.array(0), np.array(i / 100.0))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(180.1))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(0), np.array(300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(-100), np.array(-300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(100), np.array(-300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(-100), np.array(300))
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(100), np.array(300))
# test forcing zone ranges
# NYC should be zone 18T
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(40.71435), np.array(-74.00597), 70, 'T')
self.assertRaises(UTM.OutOfRangeError, UTM.from_latlon,
np.array(40.71435), np.array(-74.00597), 18, 'A')
def test_to_latlon_range_checks(self):
'''to_latlon should fail with out-of-bounds input'''
# test easting range
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(0), np.array(5000000), 32, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(99999), np.array(5000000), 32, 'U')
for i in range(100000, 999999, 1000):
UTM.to_latlon(np.array(i), np.array(5000000), 32, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(1000000), np.array(5000000), 32, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(100000000000), np.array(5000000), 32, 'U')
# test northing range
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(-100000), 32, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(-1), 32, 'U')
for i in range(10, 10000000, 1000):
UTM.to_latlon(np.array(500000), np.array(i), 32, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(10000001), 32, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(50000000), 32, 'U')
# test zone numbers
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(5000000), 0, 'U')
for i in range(1, 60):
UTM.to_latlon(np.array(500000), np.array(5000000), i, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(5000000), 61, 'U')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(5000000), 1000, 'U')
# test zone letters
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(5000000), 32, 'A')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(5000000), 32, 'B')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(5000000), 32, 'I')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(5000000), 32, 'O')
for i in range(ord('C'), ord('X')):
i = chr(i)
if i != 'I' and i != 'O':
UTM.to_latlon(np.array(500000), np.array(5000000), 32, i)
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(5000000), 32, 'Y')
self.assertRaises(
UTM.OutOfRangeError, UTM.to_latlon, np.array(500000), np.array(5000000), 32, 'Z')
class Zone32V(unittest.TestCase):
def assert_zone_equal(self, result, expected_number, expected_letter):
self.assertEqual(result[2], expected_number)
self.assertEqual(result[3].upper(), expected_letter.upper())
def test_inside(self):
self.assert_zone_equal(UTM.from_latlon(
np.array(56), np.array(3)), 32, 'V')
self.assert_zone_equal(UTM.from_latlon(
np.array(56), np.array(6)), 32, 'V')
self.assert_zone_equal(UTM.from_latlon(
np.array(56), np.array(9)), 32, 'V')
self.assert_zone_equal(UTM.from_latlon(
np.array(56), np.array(11.999999)), 32, 'V')
self.assert_zone_equal(UTM.from_latlon(
np.array(60), np.array(3)), 32, 'V')
self.assert_zone_equal(UTM.from_latlon(
np.array(60), np.array(6)), 32, 'V')
self.assert_zone_equal(UTM.from_latlon(
np.array(60), np.array(9)), 32, 'V')
self.assert_zone_equal(UTM.from_latlon(
np.array(60), np.array(11.999999)), 32, 'V')
self.assert_zone_equal(UTM.from_latlon(
np.array(63.999999), np.array(3)), 32, 'V')
self.assert_zone_equal(UTM.from_latlon(
np.array(63.999999), np.array(6)), 32, 'V')
self.assert_zone_equal(UTM.from_latlon(
np.array(63.999999), np.array(9)), 32, 'V')
self.assert_zone_equal(UTM.from_latlon(
np.array(63.999999), | np.array(11.999999) | numpy.array |
import numpy as np
import mc.util
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
class Likelihood:
def __init__(self, true_tx_power=0.8, true_min_power=1e-5, true_path_loss_exp=3, n=10) -> None:
self.true_tx_power = true_tx_power
self.true_min_power = true_min_power
self.true_path_loss_exp = true_path_loss_exp
self.tx_power_array = np.linspace(true_tx_power/2., true_tx_power*2., num=n)
self.min_power_array = np.linspace(true_min_power / 2., true_min_power * 2., num=n)
self.path_loss_exp_array = np.linspace(true_path_loss_exp / 2., true_path_loss_exp * 2., num=n)
self.i_max = None
self.i_closest = None
self.likelihood = None
# non-blocking plots
plt.ion()
self.tx_power_grid_1st, self.min_power_grid_2nd = np.meshgrid(self.tx_power_array, self.min_power_array)
_, self.path_loss_exp_grid_2nd = np.meshgrid(self.tx_power_array, self.path_loss_exp_array)
self.min_power_grid_1st, _ = np.meshgrid(self.min_power_array, self.path_loss_exp_array)
self.plots_z_value = 10000
# to be run from, at least, mc.pmc.PopulationMonteCarlo.step
def explore(self, pf, observations):
log_tx_power_array, log_min_power_array, log_path_loss_exp_array =\
np.log(self.tx_power_array), np.log(self.min_power_array), np.log(self.path_loss_exp_array)
self.likelihood = np.empty((len(self.tx_power_array), len(self.min_power_array), len(self.path_loss_exp_array)))
for i_log_tx_power, log_tx_power in enumerate(log_tx_power_array):
print('log_tx_power = {}'.format(log_tx_power))
for i_log_min_power, log_min_power in enumerate(log_min_power_array):
for i_log_path_loss_exp, log_path_loss_exp in enumerate(log_path_loss_exp_array):
self.likelihood[i_log_tx_power, i_log_min_power, i_log_path_loss_exp] =\
mc.util.loglikelihood(pf, observations, log_tx_power, log_min_power, log_path_loss_exp)
print(self.likelihood)
# this is a tuple
self.i_max = np.unravel_index(self.likelihood.argmax(), self.likelihood.shape)
print('maximum likelihood = {} at transmiter power = {}, min power = {}, path loss exponent = {} ({})'.format(
self.likelihood[self.i_max], self.tx_power_array[self.i_max[0]], self.min_power_array[self.i_max[1]],
self.path_loss_exp_array[self.i_max[2]], self.i_max))
likelihood_at_true = mc.util.loglikelihood(
pf, observations, np.log(self.true_tx_power), np.log(self.true_min_power), np.log(self.true_path_loss_exp))
print('likelihood at true parameters: {}'.format(likelihood_at_true))
self.i_closest = (
np.abs(self.tx_power_array - self.true_tx_power).argmin(),
np.abs(self.min_power_array - self.true_min_power).argmin(),
np.abs(self.path_loss_exp_array - self.true_path_loss_exp).argmin())
print('closest point to ground truth is transmiter power = {}, min power = {}, path loss exponent = {}, likelihood {} ({})'.format(
self.tx_power_array[self.i_closest[0]], self.min_power_array[self.i_closest[1]],
self.path_loss_exp_array[self.i_closest[2]], self.likelihood[self.i_closest], self.i_closest))
def plot3d(self, x, y, Z):
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = | np.meshgrid(x, y) | numpy.meshgrid |
# -*- coding: utf-8 -*-
import sys
import os
import toml
import librosa
import librosa.display
import matplotlib.pyplot as plt
import soundfile as sf
import numpy as np
from tqdm import tqdm
from joblib import Parallel, delayed
import paddle
from paddle.io import DataLoader
from paddle.signal import stft, istft
from visualdl import LogWriter
sys.path.append("./")
from FullBandNet.model import FullBandNet
from dataset.dataset import DNS_Dataset
from audio.feature import is_clipped
from audio.mask import decompress_cIRM
from audio.metrics import STOI, WB_PESQ
from audio.utils import prepare_empty_path
plt.switch_backend("agg")
class Inferencer:
def __init__(self, model, test_iter, config):
# get checkpoints path
self.checkpoints_path = os.path.join(os.path.dirname(__file__), "checkpoints")
# get output path
self.output_path = os.path.join(os.path.dirname(__file__), "enhanced")
# get logs path
self.logs_path = os.path.join(os.path.dirname(__file__), "logs", "inference")
prepare_empty_path([self.output_path, self.logs_path])
# set iter
self.test_iter = test_iter
# get model
self.model = model
self.load_checkpoint()
# get dataset args
self.sr = config["dataset"]["sr"]
self.n_fft = config["dataset"]["n_fft"]
self.win_len = config["dataset"]["win_len"]
self.hop_len = config["dataset"]["hop_len"]
self.window = paddle.to_tensor(np.hanning(self.win_len), dtype=paddle.float32)
# get inference args
self.audio_visual_samples = config["inference"]["audio_visual_samples"]
# config logs
self.writer = LogWriter(logdir=self.logs_path, max_queue=5, flush_secs=60)
self.writer_text_enh_clipped_step = 1
self.writer.add_text(
tag="config",
text_string=f"<pre \n{toml.dumps(config)} \n</pre>",
step=1,
)
def load_checkpoint(self):
best_model_path = os.path.join(self.checkpoints_path, "best_model.tar")
assert os.path.exists(best_model_path)
checkpoint = paddle.load(best_model_path)
self.epoch = checkpoint["epoch"]
self.model.set_state_dict(checkpoint["model"])
print(f"Loading model checkpoint (epoch == {self.epoch})...")
def check_clipped(self, enh, enh_file):
if is_clipped(enh):
self.writer.add_text(
tag="enh_clipped",
text_string=enh_file,
step=self.writer_text_enh_clipped_step,
)
self.writer_text_enh_clipped_step += 1
def audio_visualization(self, noisy, clean, enh, name):
self.writer.add_audio("audio/noisy", noisy, 1, sample_rate=self.sr)
self.writer.add_audio("audio/clean", clean, 1, sample_rate=self.sr)
self.writer.add_audio("audio/enh", enh, 1, sample_rate=self.sr)
# Visualize the spectrogram of noisy speech, clean speech, and enhanced speech
noisy_mag, _ = librosa.magphase(librosa.stft(noisy, n_fft=320, hop_length=160, win_length=320))
clean_mag, _ = librosa.magphase(librosa.stft(clean, n_fft=320, hop_length=160, win_length=320))
enh_mag, _ = librosa.magphase(librosa.stft(enh, n_fft=320, hop_length=160, win_length=320))
fig, axes = plt.subplots(3, 1, figsize=(6, 6))
for k, mag in enumerate([noisy_mag, clean_mag, enh_mag]):
axes[k].set_title(
f"mean: {np.mean(mag):.3f}, "
f"std: {np.std(mag):.3f}, "
f"max: {np.max(mag):.3f}, "
f"min: {np.min(mag):.3f}"
)
librosa.display.specshow(librosa.amplitude_to_db(mag), cmap="magma", y_axis="linear", ax=axes[k], sr=16000)
plt.tight_layout()
self.writer.add_figure(f"spec/{name}", fig, 1)
def metrics_visualization(self, noisy_list, clean_list, enh_list, n_jobs=8):
noisy_stoi_score = Parallel(n_jobs=n_jobs)(
delayed(STOI)(noisy, clean) for noisy, clean in tqdm(zip(noisy_list, clean_list))
)
enh_stoi_score = Parallel(n_jobs=n_jobs)(
delayed(STOI)(noisy, clean) for noisy, clean in tqdm(zip(enh_list, clean_list))
)
noisy_stoi_score_mean = np.mean(noisy_stoi_score)
enh_stoi_score_mean = np.mean(enh_stoi_score)
self.writer.add_scalar("STOI/test/noisy", noisy_stoi_score_mean, 1)
self.writer.add_scalar("STOI/test/enh", enh_stoi_score_mean, 1)
noisy_wb_pesq_score = Parallel(n_jobs=n_jobs)(
delayed(WB_PESQ)(noisy, clean) for noisy, clean in tqdm(zip(noisy_list, clean_list))
)
enh_wb_pesq_score = Parallel(n_jobs=n_jobs)(
delayed(WB_PESQ)(noisy, clean) for noisy, clean in tqdm(zip(enh_list, clean_list))
)
noisy_wb_pesq_score_mean = | np.mean(noisy_wb_pesq_score) | numpy.mean |
import numpy as np
import os
import mxnet as mx
import gluonnlp as nlp
def prepare_dataset(filename, allow_pickle=False):
return nlp.data.NumpyDataset(filename[0], allow_pickle=allow_pickle)
def prepare_bucket_sampler(dataset, batch_size, shuffle=False, num_buckets=1):
lengths = dataset.transform(lambda x: len(x))
sampler = nlp.data.FixedBucketSampler(lengths,
batch_size=batch_size,
num_buckets=num_buckets,
ratio=0,
shuffle=shuffle)
return sampler
def test_dataset_loader():
num_files = 5
for i in range(num_files):
np.save(os.path.join('tests', 'data', 'part_{}.npy'.format(i)),
| np.random.uniform(size=(100, 20)) | numpy.random.uniform |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.