prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import cv2
# https://stackoverflow.com/questions/22937589/how-to-add-noise-gaussian-salt-and-pepper-etc-to-image-in-python-with-opencv
class Noiser(object):
def __init__(self, cfg):
self.cfg = cfg
def apply(self, img):
"""
:param img: word image with big background
"""
p = []
funcs = []
if self.cfg.noise.gauss.enable:
p.append(self.cfg.noise.gauss.fraction)
funcs.append(self.apply_gauss_noise)
if self.cfg.noise.uniform.enable:
p.append(self.cfg.noise.uniform.fraction)
funcs.append(self.apply_uniform_noise)
if self.cfg.noise.salt_pepper.enable:
p.append(self.cfg.noise.salt_pepper.fraction)
funcs.append(self.apply_sp_noise)
if self.cfg.noise.poisson.enable:
p.append(self.cfg.noise.poisson.fraction)
funcs.append(self.apply_poisson_noise)
if len(p) == 0:
return img
noise_func = np.random.choice(funcs, p=p)
return noise_func(img)
def apply_gauss_noise(self, img):
"""
Gaussian-distributed additive noise.
"""
row, col, channel = img.shape
mean = 0
stddev = np.sqrt(15)
gauss_noise = np.zeros((row, col, channel))
cv2.randn(gauss_noise, mean, stddev)
out = img + gauss_noise
return out
def apply_uniform_noise(self, img):
"""
Apply zero-mean uniform noise
"""
row, col, channel = img.shape
alpha = 0.05
gauss = np.random.uniform(0 - alpha, alpha, (row, col, channel))
gauss = gauss.reshape(row, col, channel)
out = img + img * gauss
return out
def apply_sp_noise(self, img):
"""
Salt and pepper noise. Replaces random pixels with 0 or 255.
"""
row, col, channel = img.shape
s_vs_p = 0.5
amount = np.random.uniform(0.004, 0.01)
out = np.copy(img)
# Salt mode
num_salt = np.ceil(amount * img.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in img.shape]
out[coords] = 255.
# Pepper mode
num_pepper = np.ceil(amount * img.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in img.shape]
out[coords] = 0
return out
def apply_poisson_noise(self, img):
"""
Poisson-distributed noise generated from the data.
"""
vals = len(np.unique(img))
vals = 2 ** np.ceil(np.log2(vals))
if vals < 0:
return img
noisy = | np.random.poisson(img * vals) | numpy.random.poisson |
import numpy
from shadow4.syned.shape import Rectangle
from shadow4.syned.element_coordinates import ElementCoordinates
from syned.beamline.optical_elements.crystals.crystal import Crystal, DiffractionGeometry
from shadow4.physical_models.prerefl.prerefl import PreRefl
from shadow4.beamline.s4_beamline_element import S4BeamlineElement
from crystalpy.diffraction.DiffractionSetup import DiffractionSetup
from crystalpy.diffraction.DiffractionSetupDabax import DiffractionSetupDabax
from crystalpy.diffraction.DiffractionSetupShadowPreprocessorV1 import DiffractionSetupShadowPreprocessorV1
from crystalpy.diffraction.DiffractionSetupShadowPreprocessorV2 import DiffractionSetupShadowPreprocessorV2
from crystalpy.diffraction.GeometryType import BraggDiffraction
from crystalpy.diffraction.Diffraction import Diffraction
from crystalpy.util.Vector import Vector
from crystalpy.util.Photon import Photon
from crystalpy.util.ComplexAmplitudePhoton import ComplexAmplitidePhoton
from crystalpy.util.ComplexAmplitudePhotonBunch import ComplexAmplitudePhotonBunch
import scipy.constants as codata
class S4Crystal(Crystal):
def __init__(self,
name="Undefined",
boundary_shape=None,
surface_shape=None,
material=None,
diffraction_geometry=DiffractionGeometry.BRAGG, #?? not supposed to be in syned...
miller_index_h=1,
miller_index_k=1,
miller_index_l=1,
asymmetry_angle=0.0,
thickness=0.010, ###########################
f_central=False,
f_phot_cent=0,
phot_cent=8000.0,
file_refl="",
f_bragg_a=False,
# a_bragg=0.0,
f_johansson=False,
r_johansson=1.0,
f_mosaic=False,
spread_mos=0.4*numpy.pi/180,
f_ext=0,
material_constants_library_flag=0, # 0=xraylib, 1=dabax
# 2=shadow preprocessor file v1
# 3=shadow preprocessor file v1
):
"""
f_crystal = 1 - flag: crystal -- yes (1), no (0).
f_mosaic = 1 - if f_crystal=1; flag: mosaic crystal - yes (1), no (0).
f_central = 1 - flag: autotuning of grating or crystal - yes (1), no (0).
f_phot_cent = 0 - for f_central=1: tune to eV(0) or Angstroms (1).
phot_cent = 11160.0 - for f_phot_cent=1: photon energ
file_refl = 'GAAS.SHA - for f_crystal=1: file containing the crystal parameters.
f_bragg_a = 0 - flag: is the crystal asymmetric - yes (1), no (0).
f_johansson = 0 - if f_crystal=1; flag: johansson geometry - yes (1), no (0).
a_bragg = 0.0 - f_bragg_a=1: angle between crystal planes and surface.
spread_mos = 0.4 - f_mosaic=1: mosaic spread FWHM (degrees).
thickness = 0.1 - crystal thickness in m.
f_ext = 0 - flag for internal/calculated (0) parameters vs. external/user defined parameters (1).
r_johansson = 0.0 - f_ext=1: johansson radius.
"""
Crystal.__init__(self,
name=name,
surface_shape=surface_shape,
boundary_shape=boundary_shape,
material=material,
diffraction_geometry=diffraction_geometry,
miller_index_h=miller_index_h,
miller_index_k=miller_index_k,
miller_index_l=miller_index_l,
asymmetry_angle=asymmetry_angle,
thickness=thickness,
)
self._f_mosaic = f_mosaic
self._f_central = f_central
self._f_phot_cent = f_phot_cent
self._phot_cent = phot_cent
self._file_refl = file_refl
self._f_bragg_a = f_bragg_a
self._f_johansson = f_johansson
self._spread_mos = spread_mos
self._f_ext = f_ext
self._r_johansson = r_johansson
self._material_constants_library_flag = material_constants_library_flag
self.congruence()
def congruence(self):
print(self._material)
if self._f_mosaic or \
self._f_bragg_a or \
self._f_johansson:
raise Exception("Not implemented")
class S4CrystalElement(S4BeamlineElement):
def __init__(self, optical_element=None, coordinates=None):
super().__init__(optical_element if optical_element is not None else S4Crystal(),
coordinates if coordinates is not None else ElementCoordinates())
self._crystalpy_diffraction_setup = None
self.align_crystal()
def align_crystal(self):
oe = self.get_optical_element()
coor = self.get_coordinates()
if oe._material_constants_library_flag == 0:
print("\nCreating a diffraction setup (XRAYLIB)...")
diffraction_setup = DiffractionSetup(geometry_type=BraggDiffraction(), # todo: use oe._diffraction_geometry
crystal_name=oe._material, # string
thickness=oe._thickness, # meters
miller_h=oe._miller_index_h, # int
miller_k=oe._miller_index_k, # int
miller_l=oe._miller_index_l, # int
asymmetry_angle=oe._asymmetry_angle, # radians
azimuthal_angle=0.0)
elif oe._material_constants_library_flag == 1:
print("\nCreating a diffraction setup (DABAX)...")
diffraction_setup = DiffractionSetupDabax(geometry_type=BraggDiffraction(), # todo: use oe._diffraction_geometry
crystal_name=oe._material, # string
thickness=oe._thickness, # meters
miller_h=oe._miller_index_h, # int
miller_k=oe._miller_index_k, # int
miller_l=oe._miller_index_l, # int
asymmetry_angle=oe._asymmetry_angle, # radians
azimuthal_angle=0.0)
elif oe._material_constants_library_flag == 2:
print("\nCreating a diffraction setup (shadow preprocessor file V1)...")
diffraction_setup = DiffractionSetupShadowPreprocessorV1(geometry_type=BraggDiffraction(), # todo: use oe._diffraction_geometry
crystal_name=oe._material, # string
thickness=oe._thickness, # meters
miller_h=oe._miller_index_h, # int
miller_k=oe._miller_index_k, # int
miller_l=oe._miller_index_l, # int
asymmetry_angle=oe._asymmetry_angle, # radians
azimuthal_angle=0.0,
preprocessor_file=oe._file_refl)
elif oe._material_constants_library_flag == 3:
print("\nCreating a diffraction setup (shadow preprocessor file V2)...")
diffraction_setup = DiffractionSetupShadowPreprocessorV2(geometry_type=BraggDiffraction(), # todo: use oe._diffraction_geometry
crystal_name=oe._material, # string
thickness=oe._thickness, # meters
miller_h=oe._miller_index_h, # int
miller_k=oe._miller_index_k, # int
miller_l=oe._miller_index_l, # int
asymmetry_angle=oe._asymmetry_angle, # radians
azimuthal_angle=0.0,
preprocessor_file=oe._file_refl)
else:
raise NotImplementedError
self._crystalpy_diffraction_setup = diffraction_setup
if oe._f_central:
if oe._f_phot_cent == 0:
energy = oe._phot_cent
else:
energy = codata.h * codata.c / codata.e * 1e2 / (oe._phot_cent * 1e-8)
raise Exception(NotImplementedError)
setting_angle = diffraction_setup.angleBraggCorrected(energy)
print("Bragg angle for E=%f eV is %f deg" % (energy, setting_angle * 180.0 / numpy.pi))
coor.set_angles(angle_radial=numpy.pi/2-setting_angle,
angle_radial_out=numpy.pi/2-setting_angle,
angle_azimuthal=0.0)
else:
print("Info: nothing to align: f_central=0")
print(coor.info())
def trace_beam(self, beam_in, flag_lost_value=-1):
p = self.get_coordinates().p()
q = self.get_coordinates().q()
theta_grazing1 = numpy.pi / 2 - self.get_coordinates().angle_radial()
theta_grazing2 = numpy.pi / 2 - self.get_coordinates().angle_radial_out()
alpha1 = self.get_coordinates().angle_azimuthal()
#
beam = beam_in.duplicate()
#
# put beam in mirror reference system
#
beam.rotate(alpha1, axis=2)
beam.rotate(theta_grazing1, axis=1)
beam.translation([0.0, -p * numpy.cos(theta_grazing1), p * numpy.sin(theta_grazing1)])
#
# reflect beam in the mirror surface
#
soe = self.get_optical_element()
beam_in_crystal_frame_before_reflection = beam.duplicate()
if not isinstance(soe, Crystal): # undefined
raise Exception("Undefined Crystal")
else:
beam_mirr, normal = self.apply_crystal_diffraction(beam) # warning, beam is also changed!!
#
# apply mirror boundaries
#
beam_mirr.apply_boundaries_syned(soe.get_boundary_shape(), flag_lost_value=flag_lost_value)
########################################################################################
#
# TODO" apply crystal reflectivity
#
nrays = beam_mirr.get_number_of_rays()
energy = 8000.0 # eV
# Create a Diffraction object (the calculator)
diffraction = Diffraction()
scan_type = 1 # 0=scan, 1=loop on rays, 2=bunch of photons (not functional) # TODO: delete 0,2
if scan_type == 0: # scan
# setting_angle = self._crystalpy_diffraction_setup.angleBragg(energy)
setting_angle = self._crystalpy_diffraction_setup.angleBraggCorrected(energy)
angle_deviation_points = nrays
# initialize arrays for storing outputs
intensityS = numpy.zeros(nrays)
intensityP = numpy.zeros(nrays)
angle_deviation_min = -100e-6 # radians
angle_deviation_max = 100e-6 # radians
angle_step = (angle_deviation_max - angle_deviation_min) / angle_deviation_points
deviations = numpy.zeros(angle_deviation_points)
for ia in range(angle_deviation_points):
deviation = angle_deviation_min + ia * angle_step
angle = deviation + setting_angle
# calculate the components of the unitary vector of the incident photon scan
# Note that diffraction plane is YZ
yy = numpy.cos(angle)
zz = - numpy.abs(numpy.sin(angle))
photon = Photon(energy_in_ev=energy, direction_vector=Vector(0.0, yy, zz))
# if ia < 10: print(ia, 0.0, yy, zz)
# perform the calculation
coeffs = diffraction.calculateDiffractedComplexAmplitudes(self._crystalpy_diffraction_setup, photon)
# store results
deviations[ia] = deviation
intensityS[ia] = coeffs['S'].intensity()
intensityP[ia] = coeffs['P'].intensity()
elif scan_type == 1: # from beam, loop
# initialize arrays for storing outputs
complex_reflectivity_S = numpy.zeros(nrays, dtype=complex)
complex_reflectivity_P = numpy.zeros(nrays, dtype=complex)
# we retrieve data from "beam" meaning the beam before reflection, in the crystal frame (incident beam...)
xp = beam_in_crystal_frame_before_reflection.get_column(4)
yp = beam_in_crystal_frame_before_reflection.get_column(5)
zp = beam_in_crystal_frame_before_reflection.get_column(6)
energies = beam_in_crystal_frame_before_reflection.get_photon_energy_eV()
for ia in range(nrays):
photon = Photon(energy_in_ev=energies[ia], direction_vector=Vector(xp[ia], yp[ia], zp[ia]))
# if ia < 10: print(ia, xp[ia], yp[ia], zp[ia])
# perform the calculation
coeffs = diffraction.calculateDiffractedComplexAmplitudes(self._crystalpy_diffraction_setup, photon)
# store results
complex_reflectivity_S[ia] = coeffs['S'].complexAmplitude()
complex_reflectivity_P[ia] = coeffs['P'].complexAmplitude()
beam_mirr.apply_complex_reflectivities(complex_reflectivity_S, complex_reflectivity_P)
elif scan_type == 2: # from beam, bunch
# this is complicated... and not faster...
# todo: accelerate crystalpy create calculateDiffractedComplexAmplitudes for a PhotonBunch
# we retrieve data from "beam" meaning the beam before reflection, in the crystal frame (incident beam...)
xp = beam_in_crystal_frame_before_reflection.get_column(4)
yp = beam_in_crystal_frame_before_reflection.get_column(5)
zp = beam_in_crystal_frame_before_reflection.get_column(6)
energies = beam_in_crystal_frame_before_reflection.get_photon_energy_eV()
Esigma = numpy.sqrt(beam_in_crystal_frame_before_reflection.get_column(24)) * \
numpy.exp(1j * beam_in_crystal_frame_before_reflection.get_column(14))
Epi = numpy.sqrt(beam_in_crystal_frame_before_reflection.get_column(25)) * \
numpy.exp(1j * beam_in_crystal_frame_before_reflection.get_column(15))
photons = ComplexAmplitudePhotonBunch()
for ia in range(nrays):
photons.addPhoton(
ComplexAmplitidePhoton(energy_in_ev=energies[ia],
direction_vector=Vector(xp[ia], yp[ia], zp[ia]),
Esigma= 1.0, # Esigma[ia],
Epi = 1.0, # [ia],
)
)
bunch_out = diffraction.calculateDiffractedComplexAmplitudePhotonBunch(self._crystalpy_diffraction_setup, photons)
bunch_out_dict = bunch_out.toDictionary()
reflectivity_S = numpy.sqrt(numpy.array(bunch_out_dict["intensityS"]))
reflectivity_P = numpy.sqrt( | numpy.array(bunch_out_dict["intensityP"]) | numpy.array |
#This files extracts knowledge from the document collection.
# Users makes some queries regarding some properties of the virus and the functions searches the collection for answers
# It returns the sentences with the exact properties
# Approach:
# The user makes two queries.
# A general and a specific one. The general regards the abstract topic the user is interested in. Then on the detailed he/she gets the exact information
#Example:
# General query: mortality
# Detailed: mortality rate
# General: hospital
# Detailed: icu duration
# There are two collection of documents (before and after May)
# The default collection is the one after May
# User can select to search either collections
# In case not enough information is generated from on collection user can select to search both collections
# METHDOLOGY
# For each cluster on both collection the aggregated TF-iDF of every keyword is calculated.
# For every general query, the two clusters with the higher aggregated TF-iDF (on the specific keywords) are selected.
# In addition, the topic keywords extracted from LDA for every cluster are also explored.
# If a query matches a topic keyword, then the cluster with the topic keyword is also selected
#Then on the selected clusters
#Sentences which include the detailed query are returned
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import sparse
import os
from sklearn.manifold import TSNE
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
import spacy
import en_core_sci_lg
from spacy.lang.en.stop_words import STOP_WORDS
import re
from tqdm.auto import tqdm
from sklearn.model_selection import GridSearchCV
import pickle
import functools
df1 = pd.read_csv('D://Uni_Stuff/complex data project/df_final111.csv')
df2 = pd.read_csv('D:/Uni_Stuff/complex data project/df_final22.csv')
pkl_file = open('D:/Uni_Stuff/COVID-19/keywords111.pkl', 'rb')
keywords1 = pickle.load(pkl_file)
pkl_file = open('D:/Uni_Stuff/COVID-19/keywords22.pkl', 'rb')
keywords2 = pickle.load(pkl_file)
pkl_file = open('D:/Uni_Stuff/COVID-19/keywords111.pkl', 'rb')
keywords1 = pickle.load(pkl_file)
pkl_file = open('D:/Uni_Stuff/COVID-19/keywords1.pkl', 'rb')
keywords11 = pickle.load(pkl_file)
#Aggregated TF-iDF
#Create a tf-idf vectorizer for every cluster and add the sums
#First
#Splitting dfs based on their cluster
clustered_dfs1 = []
clusters1 = len(df1['cluster'].unique())
for i in range(clusters1):
new_df = df1[df1['cluster'] == i]
clustered_dfs1.append(new_df)
clustered_dfs2 = []
clusters2 = len(df2['cluster'].unique())
for i in range(clusters2):
new_df = df2[df2['cluster'] == i]
clustered_dfs2.append(new_df)
#Will get the tf-idf score of both databases and from every cluster within them both
#Defining the vectorizer
stopwords = list(STOP_WORDS)
extra_stop_words = ['doi', 'preprint', 'copyright', 'org', 'https', 'et', 'al', 'author', 'figure', 'table','rights',
'reserved', 'permission', 'use', 'used', 'using', 'biorxiv', 'medrxiv', 'license', 'fig', 'fig.', 'al.', 'cite', 'ade', 'apn'
'Elsevier', 'PMC', 'CZI', 'ct', 'licence' , 'author','doi','study','licence', '-PRON-', 'usually', 'covid', 'sars','patient'
,'human' ,'coronavirus']
for word in extra_stop_words:
if word not in stopwords:
stopwords.append(word)
model = en_core_sci_lg.load(disable=["tagger", "ner", "parser"])
#Dissabling tagger and ner as I dont care about tagging parts of speach or naming entities
#Size of the array -for tokenizing
model.max_length = 3000000
def custom_tokenizer(sentence):
#Removing all punchations with a space
sentence =re.sub(r'[^\w\s]',' ', sentence)
#Splitting some numbers and words so to be removed by the tokenizer: for example 7ac
sentence = " ".join(re.split(r'(\d+)', sentence))
#Applying the pipeline
sentence = model(sentence)
#Lematizing and lowercasing
#Removing stopwords, number, punchations, spaces and words with a single letter
tokens = [word.lemma_.lower() for word in sentence if ((str(word) not in stopwords) and (word.is_punct==False) and
(len(str(word))!=1) and (word.like_num==False) and (word.is_space==False))]
return(tokens)
#Creating a vectorizer for every dataset collection
#Will get the tf-idf score again as it was time-consuming loading it
vocabulary = df1['text']
#Fitting
vectorizer = TfidfVectorizer(tokenizer = custom_tokenizer , min_df = 20) #Might change min_df to 1 if no results are picked
tqdm.pandas()
text_vectorized1 = vectorizer.fit_transform(tqdm(vocabulary))
#Moving to the second paper collection
vocabulary = df2['text']
#Fitting
vectorizer1 = TfidfVectorizer(tokenizer = custom_tokenizer , min_df = 20) #Might change min_df to 1 if no results are picked
tqdm.pandas()
text_vectorized2 = vectorizer1.fit_transform(tqdm(vocabulary))
#Getting the tf-idf scores for every term on the two collections
vectorized_df1 = pd.DataFrame(columns = vectorizer.get_feature_names() , data = text_vectorized1.toarray())
sums = np.array([np.sum(vectorized_df1[j]) for j in vectorized_df1.columns])
tf_idf_scores1 = pd.DataFrame(index = vectorized_df1.columns , data = sums).rename(columns = {0:'Tf-IDF'})
tf_idf_scores1 = tf_idf_scores1.sort_values('Tf-IDF' , ascending = False)
tf_idf_scores1 = tf_idf_scores1.reset_index().rename(columns = {'index':'token'})
#Getting the tf-idf scores for every term on the two collections
vectorized_df2 = pd.DataFrame(columns = vectorizer1.get_feature_names() , data = text_vectorized2.toarray())
sums = np.array([ | np.sum(vectorized_df2[j]) | numpy.sum |
#
from positive import *
# Reference factorial from scipy
from scipy.misc import factorial
# Smooth 1D data
class smooth:
'''
Smooth 1D data. Initially based on https://stackoverflow.com/questions/20618804/how-to-smooth-a-curve-in-the-right-way
'''
# Class constructor
def __init__(this,y,width=None,method=None,auto_method=None,polynomial_order=2):
# Import useful things
from numpy import ones,convolve,mod,hstack,arange,cumsum,mod,array
# Handle method input; set default
method = 'savgol' if method is None else method.lower()
# Handle n input; default is None which causes method to be auto
method = 'auto' if width is None else method
# Store relevant inputs to this object
this.scalar_range = array(y)
this.width = width
this.method = method
# Handle different methods
if method in ('average','avg','mean'):
# Use Rolling Average (non convulative)
y_smooth = this.__rolling_average__(width)
elif method in ('savgol'):
# Automatically determine best smoothing length to use with average
y_smooth = this.__savgol__(width=width,polynomial_order=polynomial_order)
elif method in ('auto','optimal'):
# Automatically determine best smoothing length to use with average
y_smooth = this.__auto_smooth__(method=auto_method)
else:
error('unknown smoothing method requested: %s'%red(method))
#
this.answer = y_smooth
# Smooth using savgol filter from scipy
def __savgol__(this,width=None,polynomial_order=2):
# Import usefuls
from scipy.signal import savgol_filter as savgol
from numpy import mod
# Handle inputs
if width is None: width = max( ceil( len(this.scalar_rang)/10 ), polynomial_order+1 )
if not isinstance(width,int):
error('width muist be int')
if width<(polynomial_order+1):
width += 2
if not mod(width,2):
width += 1
#
# print '>> ',width,polynomial_order
ans = savgol( this.scalar_range, width, polynomial_order )
return ans
# Smooth using moving average of available pionts
def __rolling_average__(this,width):
# Import useful things
from numpy import ones,mod,array
''' Use a rolling average '''
# NOTE: I tried using convolution, but it didnt handle general boundary conditions well; so I wrote my own algo
if width > 0:
width = int(width+mod(width,2))/2
z = array(this.scalar_range)
for k in range(len(z)):
#
a = max(0,k-width)
b = min(len(this.scalar_range),k+width)
s = min( k-a, b-k )
a,b = k-s,k+s
z[k] = sum( this.scalar_range[a:b] ) / (b-a) if b>a else this.scalar_range[k]
else:
z = this.scalar_range
#
ans = z
return ans
# Automatically determine best smoothing length to use with average
def __auto_smooth__(this,method=None):
'''Automatically determine best smoothing length to use with average'''
# Import useful things
from numpy import ones,convolve,mod,hstack,arange,cumsum,mod,array,mean
from numpy import poly1d,polyfit,std,argmin
#
if method is None: method='savgol'
#
err,smo = [],[]
width_range = array(list(range(5,min(50,int(len(this.scalar_range)/2)))))
# print lim(width_range)
if method=='savgol':
mask = mod(width_range,2).astype(bool)
width_range = width_range[ mask ]
#
for j,k in enumerate(width_range):
smo.append( smooth(this.scalar_range,int(k),method=method).answer )
dif = this.scalar_range - smo[-1]
# err.append( -mean( dif ) if method=='savgol' else std(dif)/std(this.scalar_range) )
err.append( -mean( dif ) )
#
modeled_err = poly1d( polyfit(width_range,err,2) )(width_range)
k = argmin( modeled_err )
best_width = int( width_range[k] if k>0 else 3 )
# print 'best width = ',width_range[k]
#
y_smooth = smooth(this.scalar_range,best_width,method=method).answer
#
this.raw_error = err
this.modeled_error = modeled_err
this.trial_answers = smo
this.width_range = width_range
this.width = best_width
#
ans = y_smooth
return ans
# Plotting function
def plot(this):
# Import useful things
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = 0.8
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = 12
mpl.rcParams['axes.labelsize'] = 16
mpl.rcParams['axes.titlesize'] = 16
from matplotlib.pyplot import plot,figure,title,xlabel,ylabel,legend,subplots,gca,sca,xlim,title,subplot
from numpy import array,arange,argmin
#
if this.method in ('auto'):
#
fsz = 1.2*array([12,4])
fig1 = figure( figsize=fsz )
subplot(1,2,1)
plot( this.scalar_range,'ok',alpha=0.5)
xlim( lim(arange(len(this.scalar_range))) )
clr = rgb( len(this.width_range), jet=True, reverse=True )
for j,k in enumerate(this.width_range):
plot( this.trial_answers[j], color = clr[j], alpha=0.2 )
#
plot( this.answer, '-k' )
xlabel('$x$')
ylabel('$y(x)$')
title('Method = "%s"'%this.method)
#
subplot(1,2,2)
plot( this.width_range, this.raw_error, 'k', alpha=0.5 )
plot( this.width_range, this.modeled_error, 'g' )
k = argmin( this.modeled_error )
best_n = this.width_range[k] if k>0 else 0
plot( this.width_range[k], this.modeled_error[k], 'og', mec='none' )
xlim( lim(this.width_range) )
xlabel('$x$')
ylabel('error for $y(x)$')
title('Smoothed with $width = %d$'%this.width)
else:
fsz = 1.2*array([6,4])
fig = figure( figsize=fsz )
#
x = arange(len(this.scalar_range))
y = this.scalar_range
plot(x, y,'ok',alpha=0.3,label='Input Data')
plot(x, this.answer, 'r', label='Smoothed Data' )
xlim( lim(x) )
xlabel('$x$')
ylabel('$y(x)$')
legend(frameon=False)
title('Smoothed with $width = %d$'%this.width)
# Given an array, return a processed array such that, from 0 to k, the value of the array taken on the maximum value on [0,k]. The result is monotomic. NOTE that this function is useful for feature selection.
def upbow(a):
'''
Given an array, return a processed array such that, from 0 to k, the value of the array taken on the maximum value on [0,k]. The result is monotomic. NOTE that this function is useful for feature selection.
~llondon
'''
from numpy import ndarray,array
if not isinstance(a,ndarray):
error('input must be ndarray, instead it\'s %s'%(type(a).__class__.__name__))
b = a.copy()
u = a[0]
for k,v in enumerate(a):
b[k] = max(u,a[k])
u = b[k]
return b
# [Depreciated???] custome function for setting desirable ylimits
def pylim( x, y, axis='both', domain=None, symmetric=False, pad_y=0.1 ):
'''Try to automatically determine nice xlim and ylim settings for the current axis'''
#
from matplotlib.pyplot import xlim, ylim
from numpy import ones
#
if domain is None:
mask = ones( x.shape, dtype=bool )
else:
mask = (x>=min(domain))*(x<=max(domain))
#
if axis == 'x' or axis == 'both':
xlim( lim(x) )
#
if axis == 'y' or axis == 'both':
limy = lim(y[mask]); dy = pad_y * ( limy[1]-limy[0] )
if symmetric:
ylim( [ -limy[-1]-dy , limy[-1]+dy ] )
else:
ylim( [ limy[0]-dy , limy[-1]+dy ] )
# Calculate teh positive definite represenation of the input's complex phase
def anglep(x):
'''Calculate teh positive definite represenation of the input's complex phase '''
from numpy import angle,amin,pi,exp,amax
#
initial_shape = x.shape
x_ = x.reshape( (x.size,) )
#
x_phase = angle(x_)
C = 2*pi # max( abs(amin(x_phase)), abs(amax(x_phase)) )
x_phase -= C
for k,y in enumerate(x_phase):
while y < 0:
y += 2*pi
x_phase[k] = y
return x_phase.reshape(initial_shape)+C
# Sort an array, unwrap it, and then reimpose its original order
def sunwrap( a ):
''' Sort an array, unwrap it, and then reimpose its original order '''
# Import useful things
from numpy import unwrap,array,pi,amin,amax,isnan,nan,isinf,isfinite,mean
# Flatten array by size
true_shape = a.shape
b = a.reshape( (a.size,) )
# Handle non finites
nanmap = isnan(b) | isinf(b)
b[nanmap] = -200*pi*abs(amax(b[isfinite(b)]))
# Sort
chart = sorted( list(range(len(b))) ,key=lambda c: b[c])
# Apply the sort
c = b[ chart ]
# Unwrap the sorted
d = unwrap(c)
d -= 2*pi*( 1 + int(abs(amax(d))) )
while amax(d)<0:
d += 2*pi
# Re-order
rechart = sorted( list(range(len(d))) ,key=lambda r: chart[r])
# Restore non-finites
e = d[ rechart ]
e[nanmap] = nan
#
f = e - mean(e)
pm = mean( f[f>=0] )
mm = mean( f[f<0] )
while pm-mm > pi:
f[ f<0 ] += 2*pi
mm = mean( f[f<0] )
f += mean(e)
# Restore true shape and return
return f.reshape( true_shape )
# from numpy import unwrap
# return unwrap(a)
#
def sunwrap_dev(X_,Y_,Z_):
'''Given x,y,z unwrap z using x and y as coordinates'''
#
from numpy import unwrap,array,pi,amin,amax,isnan,nan
from numpy import sqrt,isinf,isfinite,inf
from numpy.linalg import norm
#
true_shape = X_.shape
X = X_.reshape( (X_.size,) )
Y = Y_.reshape( (Y_.size,) )
Z = Z_.reshape( (Z_.size,) )
#
threshold = pi
#
skip_dex = []
for k,z in enumerate(Z):
#
if isfinite(z) and ( k not in skip_dex ):
#
x,y = X[k],Y[k]
#
min_dr,z_min,j_min = inf,None,None
for j,zp in enumerate(Z):
if j>k:
dr = norm( [ X[j]-x, Y[j]-y ] )
if dr < min_dr:
min_dr = dr
j_min = j
z_min = zp
#
if z_min is not None:
skip_dex.append( j_min )
dz = z - z_min
if dz < threshold:
Z[k] += 2*pi
elif dz> threshold:
Z[k] -= 2*pi
#
ans = Z.reshape( true_shape )
#
return ans
# Useful identity function of two inputs --- this is here becuase pickle cannot store lambdas in python < 3
def IXY(x,y): return y
# Rudimentary single point outlier detection based on cross validation of statistical moments
# NOTE that this method is to be used sparingly. It was developed to help extrapolate NR data ti infinity
def single_outsider( A ):
'''Rudimentary outlier detection based on cross validation of statistical moments'''
# Import useful things
from numpy import std,array,argmin,ones,mean
#
true_shape = A.shape
#
a = array( abs( A.reshape( (A.size,) ) ) )
a = a - mean(a)
#
std_list = []
for k in range( len(a) ):
#
b = [ v for v in a if v!=a[k] ]
std_list.append( std(b) )
#
std_arr = array(std_list)
#
s = argmin( std_arr )
# The OUTSIDER is the data point that, when taken away, minimizes the standard deviation of the population.
# In other words, the outsider is the point that adds the most diversity.
mask = ones( a.shape, dtype=bool )
mask[s] = False
mask = mask.reshape( true_shape )
# Return the outsider's location and a mask to help locate it within related data
return s,mask
# Return the min and max limits of an 1D array
def lim(x,dilate=0):
'''
Return the min and max limits of an 1D array.
INPUT
---
x, ndarray
dilate=0, fraction of max-min by which to expand or contract output
RETURN
---
array with [min(x),max(x)]
'''
# Import useful bit
from numpy import array,amin,amax,ndarray,diff
# ensure is array
if not isinstance(x,ndarray): x = array(x)
# Columate input.
z = x.reshape((x.size,))
#
ans = array([min(z),max(z)]) + (0 if len(z)>1 else array([-1e-20,1e-20]))
#
if dilate != 0: ans += diff(ans)*dilate*array([-1,1])
# Return min and max as list
return ans
# Determine whether numpy array is uniformly spaced
def isunispaced(x,tol=1e-5):
# import usefull fun
from numpy import diff,amax
# If t is not a numpy array, then let the people know.
if not type(x).__name__=='ndarray':
msg = '(!!) The first input must be a numpy array of 1 dimension.'
# Return whether the input is uniformly spaced
return amax(diff(x,2))<tol
# Calculate rfequency domain (~1/t Hz) given time series array
def getfreq( t, shift=False ):
#
from numpy.fft import fftfreq
from numpy import diff,allclose,mean
# If t is not a numpy array, then let the people know.
if not type(t).__name__=='ndarray':
msg = '(!!) The first input must be a numpy array of 1 dimension.'
# If nonuniform time steps are found, then let the people know.
if not isunispaced(t):
msg = '(!!) The time input (t) must be uniformly spaced.'
raise ValueError(msg)
#
if shift:
f = fftshift( fftfreq( len(t), mean(diff(t)) ) )
else:
f = fftfreq( len(t), mean(diff(t)) )
#
return f
# Low level function for fixed frequency integration (FFI)
def ffintegrate(t,y,w0,n=1):
# This function is based upon 1006.1632v1 Eq 27
#
from numpy import array,allclose,ones,pi
from numpy.fft import fft,ifft,fftfreq,fftshift
from numpy import where
# If x is not a numpy array, then let the people know.
if not type(y).__name__=='ndarray':
msg = '(!!) The second input must be a numpy array of 1 dimension.'
error(msg)
# If nonuniform time steps are found, then let the people know.
if not isunispaced(t):
msg = '(!!) The time input (t) must be uniformly spaced.'
raise ValueError(msg)
# Define the lowest level main function which applies integration only once.
def ffint(t_,y_,w0=None):
# Note that the FFI method is applied in a DOUBLE SIDED way, under the assumpion tat w0 is posistive
if w0<0: w0 = abs(w0);
# Calculate the fft of the inuput data, x
f = getfreq(t_) # NOTE that no fftshift is applied
# Replace zero frequency values with very small number
if (f==0).any :
f[f==0] = 1e-9
#
w = f*2*pi
# Find masks for positive an negative fixed frequency regions
mask1 = where( (w>0) * (w<w0) ) # Positive and less than w0
mask2 = where( (w<0) * (w>-w0) ) # Negative and greater than -w0
# Preparare fills for each region of value + and - w0
fill1 = w0 * ones( w[mask1].shape )
fill2 = -w0 * ones( w[mask2].shape )
# Apply fills to the frequency regions
w[ mask1 ] = fill1; w[ mask2 ] = fill2
# Take the FFT
Y_ = fft(y_)
# Calculate the frequency domain integrated vector
Y_int = Y_ / (w*1j)
# Inverse transorm, and make sure that the inverse is of the same nuerical type as what was input
tol = 1e-8
y_isreal = allclose(y_.imag,0,atol=tol)
y_isimag = allclose(y_.real,0,atol=tol)
if y_isreal:
y_int = ifft( Y_int ).real
elif y_isimag:
y_int = ifft( Y_int ).imag
else:
y_int = ifft( Y_int )
# Share knowledge with the people.
return y_int
#
x = y
for k in range(n):
#
x = ffint(t,x,w0)
#
return x
# Derivative function that preserves array length: [(d/dt)^n y(t)] is returned
def intrp_diff( t, # domain values
y, # range values
n = 1 ): # degree of derivative
#
from numpy import diff,append
from scipy.interpolate import InterpolatedUnivariateSpline as spline
if 1 == n :
#
dt = t[1]-t[0]
dy = diff(y)/dt
dy_left = append( dy, spline( t[:-1], dy )(t[-1]) )
dy_right = append( spline( t[:-1], dy )(t[0]-dt), dy )
dy_center = 0.5 * ( dy_left + dy_right )
return dy_center
elif n > 1:
#
dy = intrp_diff( t, y )
return intrp_diff( t, dy, n-1 )
elif n == 0 :
#
return y
# Find peaks adaptation from Matlab. Yet another example recursion's power!
def findpeaks( y, min_distance = None ):
'''
Given fing the indeces and values of the input vector's local maxima.
INTPUT
--
y numpy 1D array of reals
min_distance = None minimum allowed distance between consecutive peaks
OUTPUT
--
pks peak values
locs indeces of peaks
Algorithm copied from Matlab's findLocalMaxima within findpeaks.m
<EMAIL>.org
'''
#
from numpy import array,ones,append,arange,inf,isfinite,diff,sign,ndarray,hstack,where,abs
import warnings
#
thisfun = inspect.stack()[0][3]
if min_distance is None:
#
if not isinstance(y,ndarray):
msg = red('Input must be numpy array')
error(msg,thisfun)
# bookend Y by NaN and make index vector
yTemp = hstack( [ inf, y, inf ] )
iTemp = arange( len(yTemp) )
# keep only the first of any adjacent pairs of equal values (including NaN).
yFinite = isfinite(yTemp)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
iNeq = where( ( abs(yTemp[1:]-yTemp[:-1])>1e-12 ) * ( yFinite[:-1]+yFinite[1:] ) )
iTemp = iTemp[ iNeq ]
# take the sign of the first sample derivative
s = sign( diff( yTemp[iTemp] ) )
# find local maxima
iMax = where(diff(s)<0)
# find all transitions from rising to falling or to NaN
iAny = 1 + array( where( s[:-1]!=s[1:] ) )
# index into the original index vector without the NaN bookend.
iInflect = iTemp[iAny]-1
iPk = iTemp[iMax]
# NOTE that all inflection points are found, but note used here. The function may be updated in the future to make use of inflection points.
# Package outputs
locs = iPk
pks = y[locs]
else:
#
pks,locs = findpeaks(y)
done = min( diff(locs) ) >= min_distance
pks_ = pks
c = 0
while not done:
#
pks_,locs_ = findpeaks(pks_)
print('length is %i' % len(locs_))
#
if len( locs_ ) > 1 :
#
locs = locs[ locs_ ]
pks = pks[ locs_ ]
#
done = min( diff(locs_) ) >= min_distance
else:
#
done = True
#
c+=1
print(c)
#
return pks,locs
# Find the roots of a descrete array.
def findroots( y ):
from numpy import array,arange,allclose
n = len(y)
w =[]
for k in range(n):
#
l = min(k+1,n-1)
#
if y[k]*y[l]<0 and abs(y[k]*y[l])>1e-12:
#
w.append(k)
elif allclose(0,y[k],atol=1e-12) :
#
w.append(k)
#
root_mask = array( w )
#
return root_mask
# Clone of MATLAB's find function: find all of the elements in a numpy array that satisfy a condition.
def find( bool_vec ):
#
from numpy import where
#
return where(bool_vec)[0]
# Low level function that takes in numpy 1d array, and index locations of start and end of wind, and then outputs the taper (a hanning taper). This function does not apply the taper to the data.
def maketaper(arr,state,window_type='hann',ramp=True):
'''
Low level function that takes in numpy 1d array, and index locations of start and end of wind, and then outputs the taper (a hanning taper). This function does not apply the taper to the data.
For all window types allowed, see:
https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.signal.get_window.html
'''
# Import useful things
from numpy import ones,zeros
from numpy import hanning as hann
from scipy.signal import get_window
# Vlaidate inputs
for k in state:
if k+1 > len(arr):
error('state incompatible with array dimensions: the array shape is %s, but the state is %s'%(yellow(str(arr.shape)),yellow(str(state))) )
# Parse taper state
a = state[0]
b = state[-1]
#
use_nr_window = window_type in ('nr')
# Only proceed if a valid taper is given
proceed = True
true_width = abs(b-a)
#
if ramp:
if window_type in ('nr'):
#
twice_ramp = nrwindow(2*true_width)
elif window_type in ('exp'):
#
twice_ramp = expsin_window(2*true_width)
else:
#
twice_ramp = get_window( window_type, 2*true_width )
if b>a:
true_ramp = twice_ramp[ :true_width ]
elif b<=a:
true_ramp = twice_ramp[ true_width: ]
else:
proceed = False
print(a,b)
alert('Whatght!@!')
else:
print('ramp is false')
if window_type in ('nr'):
true_ramp = nrwindow(true_width)
elif window_type in ('exp'):
true_ramp = expsin_window(true_width)
else:
true_ramp = get_window( window_type,true_width )
# Proceed (or not) with tapering
taper = ones( len(arr) ) if ramp else zeros( len(arr) )
if proceed:
# Make the taper
if b>a:
taper[ :min(state) ] = 0*taper[ :min(state) ]
# print state, state[1]-state[0], taper.shape, true_ramp.shape, taper[ min(state) : max(state) ].shape
taper[ min(state) : max(state) ] = true_ramp
else:
taper[ max(state): ] = 0*taper[ max(state): ]
taper[ min(state) : max(state) ] = true_ramp
#
if len(taper) != len(arr):
error('the taper length is inconsistent with input array')
#
return taper
# <NAME> 6/27/2012
# modifications by spxll'16
# conversion to python by spxll'16
def diff5( time, ff ):
#
from numpy import var,diff
# check that time and func are the same size
if length(time) != length(ff) :
error('time and function arrays are not the same size.')
# check that dt is fixed:
if var(diff(time))<1e-8 :
dt = time[1] - time[0]
tindmax = len(time)
else:
error('Time step is not uniform.')
# first order at the boundaries:
deriv[1] = ( -3.0*ff[4] + 16.0*ff[3] -36.0*ff[2] + 48.0*ff[1] - 25.0*ff[0] )/(12.0*dt)
deriv[2] = ( ff[5] - 6*ff[4] +18*ff[3] - 10*ff[2] - 3*ff[1] )/(12.0*dt)
deriv[-2] = ( 3.0*ff[-1] + 10.0*ff[-2] - 18*ff[-3] + 6*ff[-4] - ff[-5])/(12.0*dt)
deriv[-1] = ( 25.0*ff[-1] - 48*ff[-2] + 36.0*ff[-3] -16*ff[-4] + 3*ff[-5])/(12.0*dt)
# second order at interior:
deriv[3:-2] = ( -ff[5:] + 8*ff[4:-1] - 8*ff[2:-3] + ff[1:-4] ) / (12.0*dt)
#
return deriv
# Simple combinatoric function -- number of ways to select k of n when order doesnt matter
def nchoosek(n,k): return factorial(n)/(factorial(k)*factorial(n-k))
# High level function for spin weighted spherical harmonics
def sYlm(s,l,m,theta,phi,return_mesh=False):
# Import useful things
from numpy import array,vstack,ndarray,exp,double
# Enforce that theta and phi are arrays
phi = array( phi if isinstance(phi ,(list,tuple)) else [double(phi )] ) if not isinstance(phi ,ndarray) else phi
theta = array( theta if isinstance(theta,(list,tuple)) else [double(theta)] ) if not isinstance(theta,ndarray) else theta
#
theta_is_matrix = len(theta.shape)>1
phi_is_matrix = len(phi.shape)>1
if theta_is_matrix or phi_is_matrix :
error('theta and phi inputs must not have dimension greater than 1')
# Define function to encapsulate azimuthal dependence
Am = lambda M,PHI: exp( 1j*M*PHI )
# IF more than one phi value is given
if len(phi)>1 :
D = sDlm(s,l,m,theta)
Y = vstack( [ D * Am(m,ph) for ph in phi ] )
else: # ELSE if a single value is given
Y = sDlm(s,l,m,theta) * Am(m,phi)
#
if not return_mesh:
return Y
else:
from numpy import meshgrid
THETA,PHI = meshgrid(theta,phi)
return Y,THETA,PHI
# Use formula from wikipedia to calculate the harmonic
# See http://en.wikipedia.org/wiki/Spin-weighted_spherical_harmonics#Calculating
# for more information.
def sDlm(s,l,m,theta):
#
from numpy import pi,ones,sin,tan,exp,array,double,sqrt,zeros,ones_like
from scipy.misc import factorial,comb
#
if isinstance(theta,(float,int,double)): theta = [theta]
theta = array(theta)
#
theta = array([ double(k) for k in theta ])
# Ensure regular output (i.e. no nans)
theta[theta==0.0] = 1e-9
# Name anonymous functions for cleaner syntax
f = lambda k: double(factorial(k))
c = lambda x: double(comb(x[0],x[1]))
cot = lambda x: 1.0/double(tan(x))
# Pre-allocatetion array for calculation (see usage below)
X = ones_like( theta )
# Calcualte the "pre-sum" part of sYlm
a = (-1.0)**(m)
a = a * sqrt( f(l+m)*f(l-m)*(2.0*l+1) )
a = a / sqrt( 4.0*pi*f(l+s)*f(l-s) )
a = a * sin( theta/2.0 )**(2.0*l)
A = a * X
# Calcualte the "sum" part of sYlm
B = zeros(theta.shape)
for k in range(len(theta)):
B[k] = 0
for r in range(l-s+1):
if (r+s-m <= l+s) and (r+s-m>=0) :
a = c([l-s,r])*c([l+s,r+s-m])
a = a * (-1)**(l-r-s)
a = a * cot( theta[k]/2.0 )**(2*r+s-m)
B[k] = B[k] + a
# Calculate final output array
D = A*B
#
if (sum(abs(D.imag)) <= 1e-7).all():
D = D.real
#
return D
# Time shift array data, h, using a frequency diomain method
def tshift( t, # time sries of data
h, # data that will be shifted
t0, # time by which to shift the data
verbose=False, # Toggle to let the people know
method=None ): # amount to shift data
# Import usefuls
from scipy.fftpack import fft, fftfreq, fftshift, ifft
from numpy import diff,mean,exp,pi
# Determine if the data is all real
is_real = sum( h.imag ) == 0
#
if verbose: alert( 'The data are real valued.' )
#
if method is None:
method = 'fft'
if verbose: alert('Using the default time shifting method.')
#
if verbose: alert('The method is "%s"'%yellow(method))
# Apply the time shift
if method.lower() in ('fft'):
# take fft of input
H = fft(h)
# get frequency domain of H in hertz (non-monotonic,
# i.e. not the same as the "getfrequencyhz" function)
dt = mean(diff(t))
f = fftfreq( len(t), dt )
# shift, and calculate ifft
H_ = H * exp( -2*pi*1j*t0*f )
#
if is_real:
h_ = ifft( H_ ).real
else:
h_ = ifft( H_ ) # ** here, errors in ifft process are ignored **
elif method.lower() in ('td','index','ind','roll'):
# Use index shifting
if verbose:
alert('Note that this method assumes the data are equally spaced in time.')
#
from numpy import roll
di = int( t0/mean(diff(t)) )
h_ = roll(h, di)
else:
error('unhandled method for time shifting')
# Return the answer
return h_
# Time shift array data, h, using a index shifting method
def ishift( h, di ):
#
from numpy import roll
return roll(h,di)
# Find the interpolated global max location of a data series
def intrp_max( y, domain=None, verbose=False, return_argmax=False, plot = False, pad = 3, ref_index=None ):
#
from scipy.interpolate import UnivariateSpline as spline
from scipy.optimize import minimize
from numpy import allclose,linspace,argmax,arange,hstack,diff,argmax,argmin,mod,array,mean,std
#
PLOT = plot
if PLOT: from matplotlib.pyplot import plot,show,xlim,ylim,xlabel,ylabel,title,figure
#
t = arange(len(y)) if domain is None else domain
# Determine if y is flat
c = (y - mean(y))/std(y)
# the centered version of y, c, is determined to be flat if the largest difference is small
y_is_flat = allclose( y, y[::-1], rtol=1e-3 ) and (std(diff(y)/diff(lim(y))))<1e-3
'''
If the input vector is flat, simply take its numerical max.
Otherwise, use the intrp_max algorithm.
'''
# IF THE INPUT IS NOT FLAT
if not y_is_flat:
#
if PLOT:
#
from positive import rgb
ts = linspace( min(t), max(t), 2e2 )
ys = spline(t,y,s=0,k=4)(ts)
#
clr= rgb(3)
#
fig1 = figure()
plot( t,y, 'ok' )
plot( ts,ys, color=clr[0], linestyle='--' )
#
dy = diff( lim(y) )*0.1
ylim( array([-1,1])*dy + lim(y) )
xlim( lim(t) )
#
xlabel('domain')
ylabel('range')
#
k_max = argmax( y )
if ref_index: k_max = ref_index
t_max = t[k_max]
y_max = y[k_max]
#
if PLOT:
plot( t_max, y_max, 'o', mfc='none', mec='k', ms=16 )
# Determine points to right and left of numerical max
# This many points to right and left of numerical max will be taken
pad = pad
#
a = k_max - pad
b = k_max + pad
#
left = arange( a, k_max )
right = arange( k_max, b+1 )
#
raw_space = hstack( [left,right] )
#
space = mod( raw_space, len(y)-1 )
#
raw_kspace = list(range( len(space)))
#
if PLOT:
plot( t[ space[0] ], y[ space[0] ], '>', mfc='none', mec='g', ms = 19 )
plot( t[ space[-1] ], y[ space[-1] ], '<', mfc='none', mec='g', ms = 19 )
#
raw_suby = array( [ y[k] for k in space ] ) # y[space]
# -------------------------------------------- #
# Enforce adjacent symmetry about numerical max
# -------------------------------------------- #
left_k = 1 + argmin( abs(raw_suby[0] - raw_suby[1:]) )
right_k = argmin( abs(raw_suby[-1] - raw_suby[:-1]) )
center_k = argmax(raw_suby)
# print left_k, right_k, center_k
#
if PLOT:
fig2 = figure()
plot( raw_kspace, raw_suby, 'ok' )
# IF the clostest point is on the other side of the peak AND there is an assymetry detected
# THEN make more symmetric by removing points from left or right
mask = list(range( len(raw_suby)))
if (right_k < center_k): # and (left_k != len(raw_suby)-1) :
mask = list(range( right_k, len(raw_suby)))
elif (left_k > center_k): # and (right_k != 0) :
mask = list(range( 0, left_k+1))
# Apply the mask
kspace = array([ raw_kspace[v] for v in mask ])
suby = array([ raw_suby[v] for v in mask ])
# -------------------------------------------- #
# Interpolate local space to estimate max
# -------------------------------------------- #
try:
intrp_suby = spline( kspace, suby, k=4, s=0 )
except:
warning('Interpolative max failed. Using index.')
#
arg_max = argmax(y)
max_val = y[arg_max]
if return_argmax:
ans = (max_val,float(arg_max))
else:
ans = max_val
return ans
# Location of the max is determined analytically, given the local spline model
kspace_maxes = intrp_suby.derivative().roots()
try:
kspace_max = kspace_maxes[ argmax( intrp_suby(kspace_maxes) ) ]
except:
warning('somthing\'s wrong folks ....')
print(kspace_maxes)
from matplotlib import pyplot as pp
pp.figure()
from numpy import isnan
print(sum(isnan(y)))
pp.plot( kspace, suby, '-o' )
pp.title( diff(lim(c)) )
pp.show()
raise
#
if PLOT:
#
plot( kspace_max, intrp_suby(kspace_max), '*', ms=20, mec=clr[-1], mfc=clr[-1] )
kspace_sm = linspace(min(kspace),max(kspace))
plot( kspace_sm, intrp_suby(kspace_sm), color=clr[0], linestyle='--' )
plot( kspace, suby, 'ow', ms=4 )
#
dy = diff( lim(suby) )*0.2
ylim( array([-1,1])*dy + lim(raw_suby) )
xlim( lim(raw_kspace) )
xlabel('mapped index domain')
ylabel('wrapped range')
max_val = intrp_suby(kspace_max)
index_arg_max = spline( raw_kspace, raw_space, k=1, s=0 )(kspace_max)
arg_max = spline( list(range(len(t))), t )( index_arg_max )
#
if verbose:
print('\n>> Results of intrp_max:\n%s' % ( '--'*20 ))
print(' intrp_max \t = \t %f' % max_val)
print('intrp_arg_max \t = \t %f\n' % arg_max)
#
if PLOT:
figure( fig1.number )
plot( arg_max, max_val, '*', ms=20, mec=clr[-1], mfc=clr[-1] )
else: # IF THE INPUT IS FLAT
#
if verbose: warning('Input is determined to be flat. A simple numerical mex will be used.')
arg_max_dex = argmax( y )
if ref_index: arg_max_dex = ref_index
arg_max = t[ arg_max_dex ]
max_val = y[ arg_max_dex ]
#
if return_argmax:
ans = (max_val,float(arg_max))
else:
ans = max_val
#
return ans
# Find the interpolated global max location of a data series
# NOTE that this version does not localize around numerical max of input; this is a bad thing
def intrp_argmax( y,
domain=None,
plot=False,
ref_index = None,
verbose=False ):
#
max_val,arg_max = intrp_max( y,domain=domain,verbose=verbose,return_argmax=True,plot=plot,ref_index=ref_index )
#
ans = arg_max
return ans
# Find the interpolated global max location of a data series
# NOTE that this version does not localize around numerical max of input; this is a bad thing
def intrp_max_depreciated( y,
domain=None,
verbose=False, return_argmax=False ):
#
from scipy.interpolate import InterpolatedUnivariateSpline as spline
from scipy.optimize import minimize
from numpy import linspace,argmax
#
x = list(range(len(y))) if domain is None else domain
#
yspline = spline( x, y )
# Find the approximate max location in index
k = argmax( y )
# NOTE that we use minimize with bounds as it was found to have better behavior than fmin with no bounding
x0 = x[k]
f = lambda X: -yspline(X)
dx = 0.1*x0
q = minimize(f,x0,bounds=[(max(x0-dx,min(x)),min(x0+dx,max(x)))])
xmax = q.x[0]
#
if yspline(xmax)<max(y):
# warning('yspline(xmax)<max(y): spline optimization failed; now taking numerical max of input series')
maxval = max(y)
else:
maxval = yspline(xmax)
#
if return_argmax:
ans = (maxval,xmax)
else:
ans = maxval
# #
# from matplotlib.pyplot import plot,xlim,ylim,title,show,gca
# plot(x,y,'bo',mfc='none')
# x_ = linspace(min(x),max(x),2e2)
# plot( x_,yspline(x_),'k',alpha=0.5 )
# plot( xmax, yspline(xmax), 'or', mfc='none' )
# show()
#
return ans
#
def expsin_window( N ):
#
from numpy import hstack,array,linspace,exp,log,pi,sin
#
t = log(1e16) * (1+ sin( linspace( pi/2, -pi/2, int(N)/2 ) ))*0.5
A = exp( -t )
A -= min(A)
A /= max(A)
#
ans = hstack( [A, A[list(range(len(A)-1,0,-1))] ] ) if 2*len(A)==N else hstack( [A, A[list(range(len(A)-1,1,-1))] ] )
#
return ans
#
def spline_diff(t,y,k=3,n=1):
'''
Wrapper for InterpolatedUnivariateSpline derivative function
'''
#
from scipy.interpolate import InterpolatedUnivariateSpline as spline
# Calculate the desired number of derivatives
ans = spline(t,y.real,k=k).derivative(n=n)(t) \
+ ( 1j*spline(t,y.imag,k=k).derivative(n=n)(t) if isinstance(y[0],complex) else 0 )
return ans
#
def spline_antidiff(t,y,k=3,n=1):
'''
Wrapper for InterpolatedUnivariateSpline antiderivative function
'''
#
from scipy.interpolate import InterpolatedUnivariateSpline as spline
# Calculate the desired number of integrals
ans = spline(t,y.real,k=k).antiderivative(n=n)(t) + ( 1j*spline(t,y.imag,k=k).antiderivative(n=n)(t) if isinstance(y[0],complex) else 0 )
# Return the answer
return ans
# Sinc Intepolation
# from -- https://gist.github.com/endolith/1297227
def sinc_interp(x, s, u):
"""
Interpolates x, sampled at "s" instants
Output y is sampled at "u" instants ("u" for "upsampled")
from Matlab:
http://phaseportrait.blogspot.com/2008/06/sinc-interpolation-in-matlab.html
"""
if len(x) != len(s):
raise Exception('x and s must be the same length')
# Find the period
T = s[1] - s[0]
sincM = tile(u, (len(s), 1)) - tile(s[:, newaxis], (1, len(u)))
y = dot(x, sinc(sincM/T))
return y
#
def nrwindow( N ):
'''
The point here is to define a taper to be used for the low frequency part of waveforms from NR data samples.
'''
#
from scipy.interpolate import CubicSpline as spline
from numpy import hstack,array,linspace,pi,sin
#
numerical_data = array([ [0.000235599, 0.164826], [0.000471197, 0.140627],\
[0.000706796, 0.139527], [0.000942394, 0.154408],\
[0.00117799, 0.144668], [0.00141359, 0.0820655],\
[0.00164919, 0.107215], [0.00188479, 0.326988],\
[0.00212039, 0.612349], [0.00235599, 0.928147],\
[0.00259158, 1.25567], [0.00282718, 1.61068],\
[0.00306278, 2.05771], [0.00329838, 2.69093],\
[0.00353398, 3.58197], [0.00376958, 4.74465],\
[0.00400517, 6.14815], [0.00424077, 7.76167],\
[0.00447637, 9.66762], [0.00471197, 12.1948],\
[0.00494757, 16.2907], [0.00518317, 23.0923],\
[0.00541877, 33.2385], [0.00565436, 49.4065],\
[0.00588996, 73.3563], [0.00612556, 101.84],\
[0.00636116, 121.165], ])
#
a = numerical_data[:,1]/max(numerical_data[:,1])
n = len(a)
f = linspace(0,1,n)
#
A = spline(f,a)( linspace(0,1,int(N)/2) )
#
ans = hstack( [A, A[list(range(len(A)-1,0,-1))] ] ) if 2*len(A)==N else hstack( [A, A[list(range(len(A)-1,1,-1))] ] )
#
return ans
'''
Given data set xx yy constrcut an interpolating polynomial that passes through all points (xx,yy). The output is a function object.
http://stackoverflow.com/questions/14823891/newton-s-interpolating-polynomial-python
'''
def newtonpoly(xx,yy):
import numpy as np
#import matplotlib.pyplot as plt
def coef(x, y):
'''x : array of data points
y : array of f(x) '''
x.astype(float)
y.astype(float)
n = len(x)
a = []
for i in range(n):
a.append(y[i])
for j in range(1, n):
for i in range(n-1, j-1, -1):
a[i] = float(a[i]-a[i-1])/float(x[i]-x[i-j])
return np.array(a) # return an array of coefficient
def Eval(a, x, r):
''' a : array returned by function coef()
x : array of data points
r : the node to interpolate at '''
x.astype(float)
n = len( a ) - 1
temp = a[n]
for i in range( n - 1, -1, -1 ):
temp = temp * ( r - x[i] ) + a[i]
return temp # return the y_value interpolation
#
A = coef(xx,yy)
return lambda r: Eval( A, xx, r )
#-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-#
# """
# An OrderedSet is a custom MutableSet that remembers its order, so that every
# entry has an index that can be looked up.
#
# Based on a recipe originally posted to ActiveState Recipes by <NAME>,
# and released under the MIT license.
#
# <NAME>'s changes are as follows:
#
# - changed the content from a doubly-linked list to a regular Python list.
# Seriously, who wants O(1) deletes but O(N) lookups by index?
# - add() returns the index of the added item
# - index() just returns the index of an item
# - added a __getstate__ and __setstate__ so it can be pickled
# - added __getitem__
# """
# import collections
#
# SLICE_ALL = slice(None)
# __version__ = '1.3'
#
#
# def is_iterable(obj):
# """
# Are we being asked to look up a list of things, instead of a single thing?
# We check for the `__iter__` attribute so that this can cover types that
# don't have to be known by this module, such as NumPy arrays.
#
# Strings, however, should be considered as atomic values to look up, not
# iterables.
#
# We don't need to check for the Python 2 `unicode` type, because it doesn't
# have an `__iter__` attribute anyway.
# """
# return hasattr(obj, '__iter__') and not isinstance(obj, str)
#
#
# class OrderedSet(collections.MutableSet):
# """
# An OrderedSet is a custom MutableSet that remembers its order, so that
# every entry has an index that can be looked up.
# """
# def __init__(self, iterable=None):
# self.items = []
# self.map = {}
# if iterable is not None:
# self |= iterable
#
# def __len__(self):
# return len(self.items)
#
# def __getitem__(self, index):
# """
# Get the item at a given index.
#
# If `index` is a slice, you will get back that slice of items. If it's
# the slice [:], exactly the same object is returned. (If you want an
# independent copy of an OrderedSet, use `OrderedSet.copy()`.)
#
# If `index` is an iterable, you'll get the OrderedSet of items
# corresponding to those indices. This is similar to NumPy's
# "fancy indexing".
# """
# if index == SLICE_ALL:
# return self
# elif hasattr(index, '__index__') or isinstance(index, slice):
# result = self.items[index]
# if isinstance(result, list):
# return OrderedSet(result)
# else:
# return result
# elif is_iterable(index):
# return OrderedSet([self.items[i] for i in index])
# else:
# raise TypeError("Don't know how to index an OrderedSet by %r" %
# index)
#
# def copy(self):
# return OrderedSet(self)
#
# def __getstate__(self):
# if len(self) == 0:
# # The state can't be an empty list.
# # We need to return a truthy value, or else __setstate__ won't be run.
# #
# # This could have been done more gracefully by always putting the state
# # in a tuple, but this way is backwards- and forwards- compatible with
# # previous versions of OrderedSet.
# return (None,)
# else:
# return list(self)
#
# def __setstate__(self, state):
# if state == (None,):
# self.__init__([])
# else:
# self.__init__(state)
#
# def __contains__(self, key):
# return key in self.map
#
# def add(self, key):
# """
# Add `key` as an item to this OrderedSet, then return its index.
#
# If `key` is already in the OrderedSet, return the index it already
# had.
# """
# if key not in self.map:
# self.map[key] = len(self.items)
# self.items.append(key)
# return self.map[key]
# append = add
#
# def index(self, key):
# """
# Get the index of a given entry, raising an IndexError if it's not
# present.
#
# `key` can be an iterable of entries that is not a string, in which case
# this returns a list of indices.
# """
# if is_iterable(key):
# return [self.index(subkey) for subkey in key]
# return self.map[key]
#
# def discard(self, key):
# raise NotImplementedError(
# "Cannot remove items from an existing OrderedSet"
# )
#
# def __iter__(self):
# return iter(self.items)
#
# def __reversed__(self):
# return reversed(self.items)
#
# def __repr__(self):
# if not self:
# return '%s()' % (self.__class__.__name__,)
# return '%s(%r)' % (self.__class__.__name__, list(self))
#
# def __eq__(self, other):
# if isinstance(other, OrderedSet):
# return len(self) == len(other) and self.items == other.items
# try:
# other_as_set = set(other)
# except TypeError:
# # If `other` can't be converted into a set, it's not equal.
# return False
# else:
# return set(self) == other_as_set
#
#
"""
An OrderedSet is a custom MutableSet that remembers its order, so that every
entry has an index that can be looked up.
Based on a recipe originally posted to ActiveState Recipes by <NAME>,
and released under the MIT license.
<NAME>'s changes are as follows:
- changed the content from a doubly-linked list to a regular Python list.
Seriously, who wants O(1) deletes but O(N) lookups by index?
- add() returns the index of the added item
- index() just returns the index of an item
- added a __getstate__ and __setstate__ so it can be pickled
- added __getitem__
"""
from collections import MutableSet
def is_iterable(obj):
"""
Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables.
We don't need to check for the Python 2 `unicode` type, because it doesn't
have an `__iter__` attribute anyway.
"""
return hasattr(obj, '__iter__') and not isinstance(obj, str)
# Class for ordered sets
class OrderedSet(MutableSet):
__version__ = '1.3'
"""
An OrderedSet is a custom MutableSet that remembers its order, so that
every entry has an index that can be looked up.
"""
def __init__(self, iterable=None):
self.items = []
self.map = {}
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.items)
def __getitem__(self, index):
"""
Get the item at a given index.
If `index` is a slice, you will get back that slice of items. If it's
the slice [:], exactly the same object is returned. (If you want an
independent copy of an OrderedSet, use `OrderedSet.copy()`.)
If `index` is an iterable, you'll get the OrderedSet of items
corresponding to those indices. This is similar to NumPy's
"fancy indexing".
"""
if index == slice(None):
return self
elif hasattr(index, '__index__') or isinstance(index, slice):
result = self.items[index]
if isinstance(result, list):
return OrderedSet(result)
else:
return result
elif is_iterable(index):
return OrderedSet([self.items[i] for i in index])
else:
raise TypeError("Don't know how to index an OrderedSet by %r" %
index)
def copy(self):
return OrderedSet(self)
def __getstate__(self):
if len(self) == 0:
# The state can't be an empty list.
# We need to return a truthy value, or else __setstate__ won't be run.
#
# This could have been done more gracefully by always putting the state
# in a tuple, but this way is backwards- and forwards- compatible with
# previous versions of OrderedSet.
return (None,)
else:
return list(self)
def __setstate__(self, state):
if state == (None,):
self.__init__([])
else:
self.__init__(state)
def __contains__(self, key):
return key in self.map
def add(self, key):
"""
Add `key` as an item to this OrderedSet, then return its index.
If `key` is already in the OrderedSet, return the index it already
had.
"""
if key not in self.map:
self.map[key] = len(self.items)
self.items.append(key)
return self.map[key]
append = add
def index(self, key):
"""
Get the index of a given entry, raising an IndexError if it's not
present.
`key` can be an iterable of entries that is not a string, in which case
this returns a list of indices.
"""
if is_iterable(key):
return [self.index(subkey) for subkey in key]
return self.map[key]
def discard(self, key):
raise NotImplementedError(
"Cannot remove items from an existing OrderedSet"
)
def __iter__(self):
return iter(self.items)
def __reversed__(self):
return reversed(self.items)
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and self.items == other.items
try:
other_as_set = set(other)
except TypeError:
# If `other` can't be converted into a set, it's not equal.
return False
else:
return set(self) == other_as_set
#-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-%%-#
# Return data with common sample rates and lengths
def format_align( domain_A,range_A, # Domain and range of first 1d dataset
domain_B,range_B, # Domain and range of second 1d dataset
center_domains=False, # Toggle for setting domains to 0 at start
verbose=False):
'''
Determine time spacing of each array, and choose the larger spacing as the common one
'''
# Imoprt usefuls
from numpy import array,pad,argmax,mod,arange,angle,exp,roll,std,diff,unwrap,allclose
from scipy.interpolate import InterpolatedUnivariateSpline as spline
# Validate domains
if not isunispaced(domain_A):
error('First domain must be unispaced.')
if not isunispaced(domain_B):
error('Second domain must be unispaced.')
# Let the people know
alert('Verbose mode ON.',verbose=verbose)
# Do nothing if the data are already in the same format
if len(domain_A)==len(domain_B):
if allclose(domain_A,domain_B):
alert('Inputs already in the same format. You may wish to apply domain transformations (e.g. time shifts) outside of this function.',verbose=verbose)
return domain_A,range_A,range_B
# ~-~-~-~-~-~-~-~--~-~-~--~-~-~-~ #
# Determine bounaries of common domain
# ~-~-~-~-~-~-~-~--~-~-~--~-~-~-~ #
if center_domains:
# Center domains at start
alert('Setting domains to start at zero.',verbose=verbose)
domain_min = 0
domain_max = max( (domain_A-domain_A[0])[-1], (domain_B-domain_B[0])[-1] )
else:
# Be agnostic about whether shifts in domain may apply
domain_min = min( min(domain_A), min(domain_B) )
domain_max = max( max(domain_A), max(domain_B) )
# ~-~-~-~-~-~-~-~--~-~-~--~-~-~-~ #
# Generate a common domain
# ~-~-~-~-~-~-~-~--~-~-~--~-~-~-~ #
alert('Choosing the smallest domain spacing for calculation of common domain.',verbose=verbose)
d_A = domain_A[1]-domain_A[0]
d_B = domain_B[1]-domain_B[0]
d = min( [d_A,d_B] )
domain = arange( domain_min, domain_max+d, d )
# ~-~-~-~-~-~-~-~--~-~-~--~-~-~-~ #
# Interpolate to common domain
# ~-~-~-~-~-~-~-~--~-~-~--~-~-~-~ #
def __interpolate_domain__(dom,ran):
dom_ = dom - dom[0]
_amp = abs(ran)
_phi = unwrap(angle(ran))
_ran = spline(dom,_amp)(domain) * exp(1j*spline(dom,_phi)(domain))
mask = (domain<min(dom)) | (domain>max(dom))
_ran[mask] = 0
# Return answer
return _ran
#
alert('Interpolating data to common domain.',verbose=verbose)
range_A = __interpolate_domain__(domain_A,range_A)
range_B = __interpolate_domain__(domain_B,range_B)
#
alert('Done.',verbose=verbose)
return domain,range_A,range_B
# Given two datasets, use numpy's xcorr to align the domains and ranges.
def corr_align( domain_A,range_A,
domain_B,range_B,
plot=False,
center_domains=True,
domain_align=True ):
'''
Given two datasets, use numpy's xcorr to align the domains and ranges.
INPUTS
---
domain_A, Domain values for first dataset
range_A, Range values for first dataset
domain_B, Domain values for second dataset
range_B, Range values for second dataset
plot=False Optional plotting
OUTPUTS
---
domain_A, Aligned Domain values for first dataset
range_A, Aligned Range values for first dataset
domain_B, Aligned Domain values for second dataset
range_B, = range_A
foo Dictionary containing information about the aignment
'''
# Imoprt usefuls
from numpy import correlate, allclose
from numpy import array,pad,argmax,mod,arange,angle,exp,roll,std,diff
from scipy.interpolate import InterpolatedUnivariateSpline as spline
# Validate domains
if not isunispaced(domain_A):
error('First domain must be unispaced.')
if not isunispaced(domain_B):
error('Second domain must be unispaced.')
# ~-~-~-~-~-~-~-~--~-~-~--~-~-~-~ #
# Pad inputs to the same length (again)
# ~-~-~-~-~-~-~-~--~-~-~--~-~-~-~ #
domain,range_A,range_B = format_align(domain_A,range_A,domain_B,range_B,center_domains=True,verbose=False)
# ~-~-~-~-~-~-~-~--~-~-~--~-~-~-~ #
# Use cross-correlation to determine optimal time and phase shift
# ~-~-~-~-~-~-~-~--~-~-~--~-~-~-~ #
x = correlate(range_A,range_B,mode='full')
k = argmax( abs(x) )
x0 = x[k]
k0 = mod( k+1, len(domain) ) # NOTE that the +1 here ensures
# k0=dom0=phi0=0 when trying to align data with itself
dom0 = domain[k0]
phi0 = angle(x0)
# ~-~-~-~-~-~-~-~--~-~-~--~-~-~-~ #
# Apply the alignment parameters to input B
# ~-~-~-~-~-~-~-~--~-~-~--~-~-~-~ #
_range_B = range_B * exp( 1j*phi0 )
if domain_align: _range_B = roll( _range_B, k0 )
# ~-~-~-~-~-~-~-~--~-~-~--~-~-~-~ #
# Plot
# ~-~-~-~-~-~-~-~--~-~-~--~-~-~-~ #
if plot:
#
from matplotlib.pyplot import plot,xlim,figure,figaspect,ylim
ref_d = domain[argmax( abs(range_A) )]
#
fig = figure( figsize=1*figaspect(1.0/7) )
plot( domain, abs(range_A) )
plot( domain, abs(_range_B) )
#
plot( domain, range_A.imag, lw=1, color='r', alpha=0.8 )
plot( domain,_range_B.imag, 'k', alpha=0.9 )
#
dd = 0.25*diff(lim(domain))
xlim(lim(domain))
#
dt = domain[1]-domain[0]
figure(figsize=1*figaspect(1.0/7))
plot( arange(len(x))*dt,abs(x) )
xlim(lim(arange(len(x))*dt))
#
foo = {}
foo['phase_shift'] = phi0
foo['domain_shift'] = dom0
foo['index_shift'] = k0
foo['frmse'] = abs( std( range_A-_range_B )/std(range_A) )
# Return in same order as input with additional info
return domain,range_A,domain,_range_B,foo
# A fucntion that calculates a smoothness measure on the input 1D data.
def smoothness(y,r=20,stepsize=1,domain=None,unsigned=False):
'''
This fucntion calculates a smoothness measure on the input 1D data.
The concept is similar to that decribed here: http://www.goldensoftware.com/variogramTutorial.pdf
USAGE
---
u,x = smoothness(t,y,r=4)
INPUTS
---
t, Domain points of data set
y, Range of data set
r=4, Radius of which to consider variations (derivates)
stepsize=1, The average will be considered every stepsize points. Increasing
this from its default of 1 can be useful when processing large
data sets; however, when stepsize is not 1, the length of the
output will differ from that of the inputs.
OUTPUTS
---
u, Sub-domain which is one-to-one with smoothness measure
x, Smoothness measure -- the data, y, is smooth when |x| is approx. 1
NOTE that x=-1 is smooth and decreasing while x=1 is smooth and increasing
'''
# Import usefuls
from numpy import arange,var,std,polyfit,poly1d,mean,diff,zeros_like,array
from scipy.interpolate import InterpolatedUnivariateSpline as spline
#
if domain is None: domain = list(range(0,len(y)))
x,u = [],[]
for k in arange( 0, len(y), stepsize ):
a = max(0,k-r)
b = min(len(y),k+r)-1
D = ( y[b]-y[a] ) / (b-a)
if unsigned: D = abs(D)
d = abs( mean(diff(y[a:b])) )
x.append( ( D / d ) if d!=0 else 0 )
u.append( (domain[a]+domain[b])/2 )
# Preserve length
x = array(x)
if stepsize > 1:
x = spline( u, x, k=1 )(domain)
# Return the domain subseries and the smoothness measure
return x
# Given a 1D vec of values, clump together adjacent identical values
def clump( data ):
'''
Given a 1D vec of values, clump together adjacent identical values.
INTPUTS
---
data, 1D iterable
OUTPUTS
---
clumps, list of lists; each sublist is of like adjacent values
maps a list of index mask corresponding to the clumps (i.e. the sublists mentioned above)
EXAMPLE
---
clump([0,0,0,1,0,0,1,1,1,1,0,0,1,0,1])[0]
... ([[0, 0, 0], [1], [0, 0], [1, 1, 1, 1], [0, 0], [1], [0], [1]],
[[0, 1, 2], [3], [4, 5], [6, 7, 8, 9], [10, 11], [12], [13], [14]])
--> the largest clump is at indeces [6, 7, 8, 9]
spxll ~2018
'''
# Import usefuls
from numpy import array,diff,arange
# Find constant regions and their boundaries
d = array( [0]+list(diff(data)), dtype=bool )
e = find(d)
# For all boundaries
clump = []
for j,k in enumerate(e):
# The space between bounaries are to be clumped together
if j==0:
a = 0
else:
a = e[j-1]
b = e[j]
clump.append( data[a:b] )
# Add the trailing clump manually
clump.append(data[e[-1]:])
# Create a pullback map
M = []
k = 0
for c in clump:
M.append( list(arange(len(c))+k) )
k += len(c)
# Return the ans
ans = (clump,M)
return ans
# Given a 1d data vector, determine a mask for the largest smooth region
def smoothest_part( data,
smoothness_radius=100,
smoothness_stepsize=10,
smooth_length=80,
smoothness_tolerance=1,
unsigned=False,
verbose=False ):
'''
Given a 1d data vector, determine a mask for the largest smooth region.
smoothest_part( data, # 1D data of interest -- real
smoothness_radius=100,
smoothness_stepsize=20,
smooth_length=80
smoothness_tolerance=2,
verbose=False
~ spxll 2018
'''
# Import usefuls
from numpy import isreal,argmax
# Validate input(s)
if not isreal(data).all():
warning('Input array not real. The real part will be taken.')
data = data.real
# Calculate the smoothness of the input dataset
x = smooth( smoothness( smooth(data,smooth_length).answer ,r=smoothness_radius,stepsize=smoothness_stepsize,unsigned=unsigned), smooth_length ).answer
# x = smooth( smoothness( data ,r=smoothness_radius,stepsize=smoothness_stepsize), smooth_length ).answer
# Create a boolean represenation of smoothness
k = abs(x-1) < smoothness_tolerance
# Clump the boolean represenation and then determine the largest clump
if k.all():
#
warning('the data appears to be smooth everywhere; please consider using this function\'s optional inputs to set your smoothing criteria')
mask = list(range(len(data)))
elif k.any():
clumps,clump_masks = clump(k)
mask = clump_masks[ argmax( [ len(_) for _ in clump_masks ] ) ]
else:
warning('the data appears to not be smooth anywhere; please consider using this function\'s optional inputs to set your smoothing criteria')
mask = list(range(len(data)))
# Return answer
ans = mask
return ans
# Rotate a 3 vector using Euler angles
def rotate3(vector,alpha,beta,gamma,invert=False):
'''
Rotate a 3 vector using Euler angles under conventions defined at:
https://en.wikipedia.org/wiki/Euler_angles
https://en.wikipedia.org/wiki/Rotation_matrix
Science reference: https://arxiv.org/pdf/1110.2965.pdf (Appendix)
Specifically, the Z1,Y2,Z3 ordering is used: https://wikimedia.org/api/rest_v1/media/math/render/svg/547e522037de6467d948ecf3f7409975fe849d07
* alpha represents a rotation around the z axis
* beta represents a rotation around the x' axis
* gamma represents a rotation around the z'' axis
NOTE that in order to perform the inverse rotation, it is *not* enough to input different rotation angles. One must use the invert=True keyword. This takes the same angle inputs as the forward rotation, but correctly applies the transposed rotation matricies in the reversed order.
spxll'18
'''
# Import usefuls
from numpy import cos,sin,array,dot,ndarray,vstack
# Hangle angles as arrays
angles_are_arrays = isinstance(alpha,ndarray) and isinstance(beta,ndarray) and isinstance(gamma,ndarray)
if angles_are_arrays:
# Check for consistent array shapes
if not ( alpha.shape == beta.shape == gamma.shape ):
# Let the people know and halt
error( 'input angles as arrays must have identical array shapes' )
# Validate input(s)
if isinstance(vector,(list,tuple,ndarray)):
vector = array(vector)
else:
error('first input must be iterable compatible 3D vector; please check')
# Rotation around z''
Ra = array( [
[cos(alpha),-sin(alpha),0],
[sin(alpha),cos(alpha),0],
[0,0,1]
] )
# Rotation around y
Rb = array( [
[ | cos(beta) | numpy.cos |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pandas as pd
from pandapower.auxiliary import _sum_by_group
from pandapower.pypower.idx_bus import BASE_KV
from pandapower.pypower.idx_gen import GEN_BUS, MBASE
from pandapower.shortcircuit.idx_brch import IKSS_F, IKSS_T, IP_F, IP_T, ITH_F, ITH_T
from pandapower.shortcircuit.idx_bus import C_MIN, C_MAX, KAPPA, R_EQUIV, IKSS1, IP, ITH,\
X_EQUIV, IKSS2, IKCV, M, R_EQUIV_OHM, X_EQUIV_OHM, V_G, K_SG, SKSS
from pandapower.shortcircuit.impedance import _calc_zbus_diag
from pandapower.pypower.pfsoln import pfsoln as pfsoln_pypower
from pandapower.pf.ppci_variables import _get_pf_variables_from_ppci
def _calc_ikss(net, ppci, bus_idx):
fault = net._options["fault"]
case = net._options["case"]
c = ppci["bus"][bus_idx, C_MIN] if case == "min" else ppci["bus"][bus_idx, C_MAX]
ppci["internal"]["baseI"] = ppci["bus"][:, BASE_KV] * np.sqrt(3) / ppci["baseMVA"]
# Only for test, should correspondant to PF result
baseZ = ppci["bus"][bus_idx, BASE_KV] ** 2 / ppci["baseMVA"]
ppci["bus"][bus_idx, R_EQUIV_OHM] = baseZ * ppci["bus"][bus_idx, R_EQUIV]
ppci["bus"][bus_idx, X_EQUIV_OHM] = baseZ * ppci["bus"][bus_idx, X_EQUIV]
z_equiv = abs(ppci["bus"][bus_idx, R_EQUIV] + ppci["bus"][bus_idx, X_EQUIV] * 1j)
if fault == "3ph":
ppci["bus"][bus_idx, IKSS1] = c / z_equiv / ppci["bus"][bus_idx, BASE_KV] / np.sqrt(3) * ppci["baseMVA"]
elif fault == "2ph":
ppci["bus"][bus_idx, IKSS1] = c / z_equiv / ppci["bus"][bus_idx, BASE_KV] / 2 * ppci["baseMVA"]
if fault == "3ph":
ppci["bus"][bus_idx, SKSS] = np.sqrt(3) * ppci["bus"][bus_idx, IKSS1] * ppci["bus"][bus_idx, BASE_KV]
elif fault == "2ph":
ppci["bus"][bus_idx, SKSS] = ppci["bus"][bus_idx, IKSS1] * ppci["bus"][bus_idx, BASE_KV] / np.sqrt(3)
# Correct voltage of generator bus inside power station
if np.any(~np.isnan(ppci["bus"][:, K_SG])):
gen_bus_idx = bus_idx[~np.isnan(ppci["bus"][bus_idx, K_SG])]
ppci["bus"][gen_bus_idx, IKSS1] *=\
(ppci["bus"][gen_bus_idx, V_G] / ppci["bus"][gen_bus_idx, BASE_KV])
ppci["bus"][gen_bus_idx, SKSS] *=\
(ppci["bus"][gen_bus_idx, V_G] / ppci["bus"][gen_bus_idx, BASE_KV])
_current_source_current(net, ppci)
def _calc_ikss_1ph(net, ppci, ppci_0, bus_idx):
case = net._options["case"]
c = ppci["bus"][bus_idx, C_MIN] if case == "min" else ppci["bus"][bus_idx, C_MAX]
ppci["internal"]["baseI"] = ppci["bus"][:, BASE_KV] * np.sqrt(3) / ppci["baseMVA"]
ppci_0["internal"]["baseI"] = ppci_0["bus"][:, BASE_KV] * np.sqrt(3) / ppci_0["baseMVA"]
z_equiv = abs((ppci["bus"][bus_idx, R_EQUIV] + ppci["bus"][bus_idx, X_EQUIV] * 1j) * 2 +
(ppci_0["bus"][bus_idx, R_EQUIV] + ppci_0["bus"][bus_idx, X_EQUIV] * 1j))
# Only for test, should correspondant to PF result
baseZ = ppci["bus"][bus_idx, BASE_KV] ** 2 / ppci["baseMVA"]
ppci["bus"][bus_idx, R_EQUIV_OHM] = baseZ * ppci['bus'][bus_idx, R_EQUIV]
ppci["bus"][bus_idx, X_EQUIV_OHM] = baseZ * ppci['bus'][bus_idx, X_EQUIV]
ppci_0["bus"][bus_idx, R_EQUIV_OHM] = baseZ * ppci_0['bus'][bus_idx, R_EQUIV]
ppci_0["bus"][bus_idx, X_EQUIV_OHM] = baseZ * ppci_0['bus'][bus_idx, X_EQUIV]
ppci_0["bus"][bus_idx, IKSS1] = c / z_equiv / ppci_0["bus"][bus_idx, BASE_KV] * np.sqrt(3) * ppci_0["baseMVA"]
ppci["bus"][bus_idx, IKSS1] = c / z_equiv / ppci["bus"][bus_idx, BASE_KV] * np.sqrt(3) * ppci["baseMVA"]
_current_source_current(net, ppci)
def _current_source_current(net, ppci):
ppci["bus"][:, IKCV] = 0
ppci["bus"][:, IKSS2] = 0
bus_lookup = net["_pd2ppc_lookups"]["bus"]
if not False in net.sgen.current_source.values:
sgen = net.sgen[net._is_elements["sgen"]]
else:
sgen = net.sgen[net._is_elements["sgen"] & net.sgen.current_source]
if len(sgen) == 0:
return
if any(pd.isnull(sgen.sn_mva)):
raise ValueError("sn_mva needs to be specified for all sgens in net.sgen.sn_mva")
baseI = ppci["internal"]["baseI"]
sgen_buses = sgen.bus.values
sgen_buses_ppc = bus_lookup[sgen_buses]
if not "k" in sgen:
raise ValueError("Nominal to short-circuit current has to specified in net.sgen.k")
i_sgen_pu = sgen.sn_mva.values / net.sn_mva * sgen.k.values
buses, ikcv_pu, _ = _sum_by_group(sgen_buses_ppc, i_sgen_pu, i_sgen_pu)
ppci["bus"][buses, IKCV] = ikcv_pu
if net["_options"]["inverse_y"]:
Zbus = ppci["internal"]["Zbus"]
ppci["bus"][:, IKSS2] = abs(1 / np.diag(Zbus) * np.dot(Zbus, ppci["bus"][:, IKCV] * -1j) / baseI)
else:
ybus_fact = ppci["internal"]["ybus_fact"]
diagZ = _calc_zbus_diag(net, ppci)
ppci["bus"][:, IKSS2] = abs(ybus_fact(ppci["bus"][:, IKCV] * -1j) / diagZ / baseI)
ppci["bus"][buses, IKCV] /= baseI[buses]
def _calc_ip(net, ppci):
ip = np.sqrt(2) * (ppci["bus"][:, KAPPA] * ppci["bus"][:, IKSS1] + ppci["bus"][:, IKSS2])
ppci["bus"][:, IP] = ip
def _calc_ith(net, ppci):
tk_s = net["_options"]["tk_s"]
kappa = ppci["bus"][:, KAPPA]
f = 50
n = 1
m = (np.exp(4 * f * tk_s * np.log(kappa - 1)) - 1) / (2 * f * tk_s * np.log(kappa - 1))
m[np.where(kappa > 1.99)] = 0
ppci["bus"][:, M] = m
ith = (ppci["bus"][:, IKSS1] + ppci["bus"][:, IKSS2]) * np.sqrt(m + n)
ppci["bus"][:, ITH] = ith
# TODO: Ib for generation close bus
# def _calc_ib_generator(net, ppci):
# # Zbus = ppci["internal"]["Zbus"]
# # baseI = ppci["internal"]["baseI"]
# tk_s = net._options['tk_s']
# c = 1.1
# z_equiv = ppci["bus"][:, R_EQUIV] + ppci["bus"][:, X_EQUIV] * 1j
# I_ikss = c / z_equiv / ppci["bus"][:, BASE_KV] / np.sqrt(3) * ppci["baseMVA"]
# # calculate voltage source branch current
# # I_ikss = ppci["bus"][:, IKSS1]
# # V_ikss = (I_ikss * baseI) * Zbus
# gen = net["gen"][net._is_elements["gen"]]
# gen_vn_kv = gen.vn_kv.values
# # Check difference ext_grid and gen
# gen_buses = ppci['gen'][:, GEN_BUS].astype(np.int64)
# gen_mbase = ppci['gen'][:, MBASE]
# gen_i_rg = gen_mbase / (np.sqrt(3) * gen_vn_kv)
# gen_buses_ppc, gen_sn_mva, I_rG = _sum_by_group(gen_buses, gen_mbase, gen_i_rg)
# # shunt admittance of generator buses and generator short circuit current
# # YS = ppci["bus"][gen_buses_ppc, GS] + ppci["bus"][gen_buses_ppc, BS] * 1j
# # I_kG = V_ikss.T[:, gen_buses_ppc] * YS / baseI[gen_buses_ppc]
# xdss_pu = gen.xdss_pu.values
# rdss_pu = gen.rdss_pu.values
# cosphi = gen.cos_phi.values
# X_dsss = xdss_pu * np.square(gen_vn_kv) / gen_mbase
# R_dsss = rdss_pu * np.square(gen_vn_kv) / gen_mbase
# K_G = ppci['bus'][gen_buses, BASE_KV] / gen_vn_kv * c / (1 + xdss_pu * np.sin(np.arccos(cosphi)))
# Z_G = (R_dsss + 1j * X_dsss)
# I_kG = c * ppci['bus'][gen_buses, BASE_KV] / np.sqrt(3) / (Z_G * K_G) * ppci["baseMVA"]
# dV_G = 1j * X_dsss * K_G * I_kG
# V_Is = c * ppci['bus'][gen_buses, BASE_KV] / np.sqrt(3)
# # I_kG_contribution = I_kG.sum(axis=1)
# # ratio_SG_ikss = I_kG_contribution / I_ikss
# # close_to_SG = ratio_SG_ikss > 5e-2
# close_to_SG = I_kG / I_rG > 2
# if tk_s == 2e-2:
# mu = 0.84 + 0.26 * np.exp(-0.26 * abs(I_kG) / I_rG)
# elif tk_s == 5e-2:
# mu = 0.71 + 0.51 * np.exp(-0.3 * abs(I_kG) / I_rG)
# elif tk_s == 10e-2:
# mu = 0.62 + 0.72 * np.exp(-0.32 * abs(I_kG) / I_rG)
# elif tk_s >= 25e-2:
# mu = 0.56 + 0.94 * np.exp(-0.38 * abs(I_kG) / I_rG)
# else:
# raise UserWarning('not implemented for other tk_s than 20ms, 50ms, 100ms and >=250ms')
# mu = np.clip(mu, 0, 1)
# I_ikss_G = abs(I_ikss - np.sum((1 - mu) * I_kG, axis=1))
# # I_ikss_G = I_ikss - np.sum(abs(V_ikss.T[:, gen_buses_ppc]) * (1-mu) * I_kG, axis=1)
# I_ikss_G = abs(I_ikss - np.sum(dV_G / V_Is * (1 - mu) * I_kG, axis=1))
# return I_ikss_G
def _calc_branch_currents(net, ppci, bus_idx):
n_sc_bus = np.shape(bus_idx)[0]
case = net._options["case"]
minmax = np.nanmin if case == "min" else np.nanmax
Yf = ppci["internal"]["Yf"]
Yt = ppci["internal"]["Yt"]
baseI = ppci["internal"]["baseI"]
n_bus = ppci["bus"].shape[0]
fb = np.real(ppci["branch"][:, 0]).astype(int)
tb = np.real(ppci["branch"][:, 1]).astype(int)
# calculate voltage source branch current
if net["_options"]["inverse_y"]:
Zbus = ppci["internal"]["Zbus"]
V_ikss = (ppci["bus"][:, IKSS1] * baseI) * Zbus
V_ikss = V_ikss[:, bus_idx]
else:
ybus_fact = ppci["internal"]["ybus_fact"]
V_ikss = np.zeros((n_bus, n_sc_bus), dtype=np.complex)
for ix, b in enumerate(bus_idx):
ikss = np.zeros(n_bus, dtype=np.complex)
ikss[b] = ppci["bus"][b, IKSS1] * baseI[b]
V_ikss[:, ix] = ybus_fact(ikss)
ikss1_all_f = np.conj(Yf.dot(V_ikss))
ikss1_all_t = np.conj(Yt.dot(V_ikss))
ikss1_all_f[abs(ikss1_all_f) < 1e-10] = 0.
ikss1_all_t[abs(ikss1_all_t) < 1e-10] = 0.
# add current source branch current if there is one
current_sources = any(ppci["bus"][:, IKCV]) > 0
if current_sources:
current = np.tile(-ppci["bus"][:, IKCV], (n_sc_bus, 1))
for ix, b in enumerate(bus_idx):
current[ix, b] += ppci["bus"][b, IKSS2]
# calculate voltage source branch current
if net["_options"]["inverse_y"]:
Zbus = ppci["internal"]["Zbus"]
V = np.dot((current * baseI), Zbus).T
else:
ybus_fact = ppci["internal"]["ybus_fact"]
V = np.zeros((n_bus, n_sc_bus), dtype=np.complex)
for ix, b in enumerate(bus_idx):
V[:, ix] = ybus_fact(current[ix, :] * baseI[b])
fb = np.real(ppci["branch"][:, 0]).astype(int)
tb = np.real(ppci["branch"][:, 1]).astype(int)
ikss2_all_f = np.conj(Yf.dot(V))
ikss2_all_t = np.conj(Yt.dot(V))
ikss_all_f = abs(ikss1_all_f + ikss2_all_f)
ikss_all_t = abs(ikss1_all_t + ikss2_all_t)
else:
ikss_all_f = abs(ikss1_all_f)
ikss_all_t = abs(ikss1_all_t)
if net._options["return_all_currents"]:
ppci["internal"]["branch_ikss_f"] = ikss_all_f / baseI[fb, None]
ppci["internal"]["branch_ikss_t"] = ikss_all_t / baseI[tb, None]
else:
ikss_all_f[abs(ikss_all_f) < 1e-10] = np.nan
ikss_all_t[abs(ikss_all_t) < 1e-10] = np.nan
ppci["branch"][:, IKSS_F] = np.nan_to_num(minmax(ikss_all_f, axis=1) / baseI[fb])
ppci["branch"][:, IKSS_T] = np.nan_to_num(minmax(ikss_all_t, axis=1) / baseI[tb])
if net._options["ip"]:
kappa = ppci["bus"][:, KAPPA]
if current_sources:
ip_all_f = | np.sqrt(2) | numpy.sqrt |
from functools import partial
import numpy as np
import pytest
from guacamol.score_modifier import LinearModifier, SquaredModifier, AbsoluteScoreModifier, GaussianModifier, \
MinGaussianModifier, MaxGaussianModifier, ThresholdedLinearModifier, ClippedScoreModifier, \
SmoothClippedScoreModifier, ChainedModifier
scalar_value = 8.343
value_array = np.array([[-3.3, 0, 5.5],
[0.011, 2.0, -33]])
def test_linear_function_default():
f = LinearModifier()
assert f(scalar_value) == scalar_value
assert np.array_equal(f(value_array), value_array)
def test_linear_function_with_slope():
slope = 3.3
f = LinearModifier(slope=slope)
assert f(scalar_value) == slope * scalar_value
assert np.array_equal(f(value_array), slope * value_array)
def test_squared_function():
target_value = 5.555
coefficient = 0.123
f = SquaredModifier(target_value=target_value, coefficient=coefficient)
expected_scalar = 1.0 - coefficient * (target_value - scalar_value) ** 2
expected_array = 1.0 - coefficient * np.square(target_value - value_array)
assert f(scalar_value) == expected_scalar
assert np.array_equal(f(value_array), expected_array)
def test_absolute_function():
target_value = 5.555
f = AbsoluteScoreModifier(target_value=target_value)
expected_scalar = 1.0 - abs(target_value - scalar_value)
expected_array = 1.0 - np.abs(target_value - value_array)
assert f(scalar_value) == expected_scalar
assert np.array_equal(f(value_array), expected_array)
def gaussian(x, mu, sig):
return np.exp(- | np.power(x - mu, 2.) | numpy.power |
import unittest
import numpy as np
from nptest import nptest
class LargeArrayTests(unittest.TestCase):
def test_largearray_matmul_INT64_1(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.matmul(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat)
print(z)
def test_largearray_matmul_INT64_2(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.matmul(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
def test_largearray_add_INT64_1(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.add(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
def test_largearray_add_INT64_2(self):
width = 1024
height = 1024
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.add(x_range.reshape(width,1), y_range.reshape(1, height))
x_mat = np.expand_dims(x_mat, 0)
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=2)
z1 = np.sum(z)
print(z1)
def test_largearray_multiply_INT64_1(self):
width = 2048
height = 2048
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.multiply(x_range.reshape(width,1), y_range.reshape(1, height))
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
def test_largearray_multiply_INT64_2(self):
width = 4096
height = 4096
x_range = np.arange(0,width, 1, dtype = np.int64)
y_range = np.arange(0,height*2, 2, dtype = np.int64)
x_mat = np.multiply(x_range.reshape(1, width), y_range.reshape(height, 1))
x_mat = np.expand_dims(x_mat, 0)
z = np.sum(x_mat, axis=0)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=1)
z1 = np.sum(z)
print(z1)
z = np.sum(x_mat, axis=2)
z1 = np.sum(z)
print(z1)
def test_largearray_copy_int64_1(self):
length = 268435435 # (Int32.MaxValue) / sizeof(double) - 20;
x = np.arange(0, length, 1, dtype = np.int64);
z = np.sum(x);
print(z)
y = x.copy()
z = np.sum(y)
print(z)
def test_largearray_copy_int64_2(self):
length = 268435434 # (Int32.MaxValue) / sizeof(double) - 21;
x = np.arange(0, length, 1, dtype = np.int64).reshape(2,-1);
z = np.sum(x, axis=0);
z = np.sum(z)
print(z)
y = x.copy()
z = np.sum(y, axis=1)
z = np.sum(z)
print(z)
def test_largearray_meshgrid_int64_2(self):
length = 100 * 100
x = np.arange(0,length, 1, dtype = np.int64)
x1, x2 = np.meshgrid(x,x)
print(x1.shape)
print(x2.shape)
z = np.sum(x1)
print(z)
z = np.sum(x2)
print(z)
def test_largearray_checkerboard_1(self):
x = np.zeros((2048,2048),dtype=int)
x[1::2,::2] = 1
x[::2,1::2] = 1
print(np.sum(x))
def test_largearray_byteswap_int64_2(self):
length = 1024 * 1024* 32 # (Int32.MaxValue) / sizeof(double) - 21;
x = np.arange(0, length, 1, dtype = np.int64).reshape(2,-1);
y = x.byteswap();
z = np.sum(y, axis=0);
z = np.sum(z)
print(z)
z = np.sum(y, axis=1)
z = np.sum(z)
print(z)
def test_largearray_unique_INT32(self):
matrix = np.arange(16000000, dtype=np.int32).reshape((40, -1));
matrix = matrix[1:40:2, 1:-2:1]
uvalues, indexes, inverse, counts = np.unique(matrix, return_counts = True, return_index=True, return_inverse=True);
print(np.sum(uvalues))
print(np.sum(indexes))
print(np.sum(inverse))
print(np.sum(counts))
def test_largearray_where_INT32(self):
matrix = np.arange(16000000, dtype=np.int32).reshape((40, -1));
print(np.sum(matrix))
indices = np.where(matrix % 2 == 0);
m1 = matrix[indices]
print(np.sum(m1))
def test_largearray_insert_INT64(self):
matrix = np.arange(16000000, dtype=np.int64).reshape((40, -1));
print(np.sum(matrix))
m1 = np.insert(matrix, 0, [999,100,101])
print(np.sum(m1))
def test_largearray_append_INT64(self):
matrix = np.arange(16000000, dtype=np.int64).reshape((40, -1));
print(np.sum(matrix))
m1 = np.append(matrix, [999,100,101])
print(np.sum(m1))
def test_largearray_concatenate_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((40, -1));
b = np.arange(1, 16000001, dtype=np.int64).reshape((40, -1));
c = np.concatenate((a, b), axis=0)
print(np.sum(c))
#d = np.concatenate((a.T, b), axis=1)
#print(np.sum(d))
e = np.concatenate((a, b), axis=None)
print(np.sum(e))
def test_largearray_min_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((40, -1));
b = np.amin(a)
print(np.sum(b))
b = np.amin(a, axis=0)
print(np.sum(b))
b = np.amin(a, axis=1)
print(np.sum(b))
def test_largearray_max_INT64(self):
a = np.arange(16000000, dtype=np.int64).reshape((40, -1));
b = np.amax(a)
print( | np.sum(b) | numpy.sum |
# -*- coding: utf-8 -*-
""" Generates the outputs of an arbitrary CNN layer. """
__author__ = "<NAME>, ISELAB, CVC-UAB"
__email__ = "<EMAIL>"
import argparse
# CMD Options
parser = argparse.ArgumentParser(description="""Generates the outputs of an arbitrary CNN layer
accepts either a LMDB dataset or a listfile of images.""")
parser.add_argument('model', type=str, help='The model deploy file.')
parser.add_argument('weights', type=str, help='The model weights file.')
parser.add_argument('layer', type=str, nargs='+', help='The target layer(s).')
parser.add_argument('--output', type=str, help='The output file.', default='output.h5')
parser.add_argument('--flist', nargs=2, type=str, help='The base folder and the file list of the images.', default=None)
parser.add_argument('--label_names', nargs='+', type=str, default=['labels'], help='specific label names, accepts more than one label')
parser.add_argument('--dataset', type=str, help='The lmdb dataset.', default=None)
parser.add_argument('--mean_tensor', type=float, nargs=3, default=None, help='Pixel mean (3 bgr values)')
parser.add_argument('--mean_file', type=str, default=None, help='Per-pixel mean in bgr')
parser.add_argument('--raw_scale', type=float, default=None, help='Scale value before mean subtraction.')
parser.add_argument('--input_scale', type=float, default=None, help='Scale value after mean subtraction.')
parser.add_argument('--swap', action='store_true', help='BGR -> RGB. Make sure lmdb raw images are bgr')
parser.add_argument('--cpuonly', action='store_true', help='CPU-Only flag.')
parser.add_argument('--standarize', action='store_true', help="whether to standarize the outputs")
parser.add_argument('--standarize_with', type=str, default='', help='get mean and std from another .h5 (recommended for validation)')
parser.add_argument('--verbose', action='store_true', help='show image paths while being processed')
parser.add_argument('--batch_size', '-b', type=int, default=1, help="batch size")
parser.add_argument('--make_deploy', '-d', nargs=3, default=[], type=int, help="given the input size (c h w), it converts trainval into deploy")
parser.add_argument('--flatten', action="store_true", help="whether to save flattened features")
parser.add_argument('--center_crop', type=int, default=None, help="Center crop size")
parser.add_argument('--test', action="store_true")
args = parser.parse_args()
#TODO
if args.standarize and len(args.label_names > 1):
raise NotImplementedError("This code does not support yet standarizing multiple labels")
# Move the rest of imports to avoid conflicts with argparse
import sys
import os
from loader import caffe
from PIL import Image
import h5py
import numpy as np
if len(args.make_deploy) > 0:
from make_deploy import make_deploy
model_path = os.path.join(os.path.dirname(args.model), "deploy.prototxt")
with open(model_path, 'w') as deploy:
deploy.write(make_deploy(args.model, args.make_deploy, args.layer[-1]))
args.model = model_path
# CPU ONLY
if not args.cpuonly:
caffe.set_mode_gpu()
# Read Deploy + Weights file
net = caffe.Net(args.model, args.weights,
caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
# images are assumed to be in format hxwxc
transformer.set_transpose('data', (2,0,1))
# get mean
if args.mean_tensor is not None:
transformer.set_mean('data', | np.array(args.mean_tensor) | numpy.array |
import json
import os
from typing import List
from numpy.random import randint
import numpy as np
import argparse
from enum import Enum
from backend import ScheduleNode, Schedule
from backend import TablaTemplate
from backend import OP_SELECT_WIDTH, OP_WIDTH, MEM_INTERFACE_WIDTH, BUS_WIDTH
from backend import PE
class Lane(object):
"""A Lane is a memory interface component that connects a set of PEs together. Once data is read through AXI, it is
fed to Lanes before being written to its corresponding PEs.
TODO Make this inherit from Component class.
"""
def __init__(self, laneid: int, peids: List[int]):
"""
Parameters
----------
laneid : int
Unique ID assigned to this Lane.
peids : List[int]
IDs of PEs attached to this Lane.
"""
self.laneid = laneid
self.peids = peids
def get_relpeid(self, peid: int) -> int:
"""Given a PE ID, returns the relative offset in this Lane.
"""
if peid in self.peids:
return self.peids.index(peid)
else:
raise Exception("PE (ID: {:d}) does not exist in this lane!".format(peid))
def __str__(self) -> str:
return f'Lane {self.laneid}: PE IDs: {self.peids}'
class LaneGenerator(object):
"""A class to manage Lanes. Given the total number of lanes and PEs per Lane, Generates the Lanes accordingly.
"""
def __init__(self, architecture: TablaTemplate, nlanes: int = 16, pes_per_lane: int = 4):
"""
Parameters
----------
nlanes : int
Number of Lanes to be generated.
pes_per_lane : int
Number of PEs attached to each Lane.
"""
self.architecture = architecture
self.nlanes = nlanes
self.pes_per_lane = pes_per_lane
def init_lanes(self):
lanes = []
for base_peid in range(self.nlanes):
lanes.append(Lane(base_peid, [base_peid + self.nlanes * i for i in range(self.pes_per_lane)]))
return lanes
def get_lanes_by_shift_amount(self, batch: List):
"""Given a batch, figure out shift amounts for each Lane, and group these by shift amount.
"""
lanes_by_shift = {}
for curr_lane, data in enumerate(batch):
if data is not None:
component_map = self.architecture.component_map
dest_pe = component_map[data.src_component]
dest_pe_id = dest_pe.category_id
# print(f"Lane: {curr_lane}, Data: {data._edge_name}, Namespace: {data.namespace_name}, PE: {dest_pe_id}")
# print(data._edge_name, dest_pe_id, data.path)
# for comp in data.path:
# if component_map[comp].component_type == "pe":
# print("PE ID: ", component_map[comp].category_id)
dest_lane_id = self.get_dest_laneid(dest_pe_id)
shift_amount = self.get_shift_amount(curr_lane, dest_lane_id)
if shift_amount in lanes_by_shift:
lanes_by_shift[shift_amount].append((dest_lane_id, dest_pe_id))
else:
lanes_by_shift[shift_amount] = [(dest_lane_id, dest_pe_id)]
# print("pos: {:d}, dest_lane_id: {:d}, shift to left: {:d}".format(curr_lane, dest_lane_id, shift_amount))
return lanes_by_shift
def get_shift_amount(self, curr_lane_id: int, dest_lane_id: int) -> int:
"""Given a current Lane position and destination Lane, calculate the shift amount (left shift only).
Parameters
----------
curr_lane_id : int
Current Lane position.
dest_lane_id : int
Destination Lane position.
Returns
-------
Shift amount
"""
if curr_lane_id >= dest_lane_id:
shift_amount = curr_lane_id - dest_lane_id
else:
shift_amount = self.nlanes - (dest_lane_id - curr_lane_id)
return shift_amount
def get_dest_laneid(self, pe_id):
return pe_id % self.nlanes
def get_lane(self, lanes, lane_id):
return lanes[lane_id]
class AXI(object):
"""AXI Master.
"""
def __init__(self, id, axi_size: int = 64, axi_read_cycle: int = 4):
self.id = id
# these two variables determine how many read instructions are required
self.axi_size = axi_size # number of data elements read by each AXI
self.axi_read_cycle = axi_read_cycle # number of data elements read in one cycle
self.lanes = []
self.data = [] # all data
self.data_by_cycle = [] # all data grouped by cycle (4 per cycle)
def set_lanes(self, lanes):
self.lanes = lanes
def __str__(self):
lanes = ''
for lane in self.lanes:
lanes += str(lane) + '\n'
return f'AXI {self.id}:\n{lanes}'
class AXIController(object):
"""Reads data from DDR through AXI.
"""
def __init__(self, axi_list, architecture):
self.axi_list = axi_list
# TODO The following two lines are too ad-hoc.
self.axi_size = self.axi_list[0].axi_size
self.axi_read_cycle = self.axi_list[0].axi_read_cycle
self.architecture = architecture
@property
def max_cycle(self):
cycle = 0
for axi in self.axi_list:
if len(axi.data_by_cycle) > cycle:
cycle = len(axi.data_by_cycle)
return cycle
def assign_axi(self, data: List[ScheduleNode]):
"""Assigns each data element to corresponding AXI master.
TODO This is buggy if data size is greater than `self.axi_size * len(self.axi_list)`.
"""
axis = len(data) // self.axi_size
r = len(data) % self.axi_size
for i in range(axis):
self.axi_list[i % 4].data.extend(data[i * self.axi_size: i * self.axi_size + self.axi_size])
if r > 0:
if axis == 0:
self.axi_list[0].data.extend(data[:])
else:
i += 1
self.axi_list[i % 4].data.extend(data[i * self.axi_size:])
def assign_weights_to_pe(self, weight_nodes):
for weight_node in weight_nodes:
# Find destination PE
component_map = self.architecture.component_map
dest_pe = component_map[weight_node.src_component]
dest_pe_id = dest_pe.category_id
# Put the node in the PE
dest_pe.weight_nodes.append(weight_node)
def print_axi_contents(self):
for axi in self.axi_list:
print(f'AXI {axi.id}:')
for lane in axi.lanes:
print(f'Lane {lane.laneid}:')
for pe_id in lane.peids:
print(f'PE ID {pe_id}: ', end='')
pe = self.architecture.cat_component_map['pe'][pe_id]
for data in pe.weight_nodes:
print(f'{data._edge_name}', end=', ')
print()
print()
print()
def gen_matrix_for_axi(self, axi_id):
axi = self.axi_list[axi_id]
lane_data = []
for lane in axi.lanes:
weight_data = []
for pe_id in lane.peids:
pe = self.architecture.cat_component_map['pe'][pe_id]
values = [node.value for node in pe.weight_nodes]
weight_data.extend(values)
lane_data.append(weight_data)
return lane_data
def find_max_number_of_weights(self, lanes):
max_num = -1
for lane in lanes:
num_weights = len(lane)
if num_weights > max_num:
max_num = num_weights
return max_num
def put_placeholder(self, weight_matrix, pe_index, lane_index, num_placeholder):
"""This is only used for weights."""
values = weight_matrix[pe_index, lane_index]
concatenated = np.append(values, np.zeros((num_placeholder,), dtype=int))
weight_matrix[pe_index, lane_index] = concatenated
def divide_axi_data_by_cycle(self):
import math
"""Groups AXI data by cycle. Every AXI cna read 4 data elements at a time."""
for axi in self.axi_list:
cycls = len(axi.data) // self.axi_read_cycle
r = len(axi.data) % self.axi_read_cycle
for i in range(cycls):
axi.data_by_cycle.append(axi.data[i * self.axi_read_cycle: i * self.axi_read_cycle + self.axi_read_cycle])
if r > 0:
if cycls == 0:
axi.data_by_cycle.append(axi.data[:])
else:
i += 1
axi.data_by_cycle.append(axi.data[i * self.axi_read_cycle:])
def get_axi_data_for_cycle(self, cycle: int):
"""Reads all data from every AXI master in the given cycle.
"""
batch = []
for axi in self.axi_list:
if cycle >= len(axi.data_by_cycle):
continue
else:
batch.extend(axi.data_by_cycle[cycle])
return batch
def get_axi_head_data(self):
"""Gets head data from each axi"""
batch = []
for axi in self.axi_list:
head_data = axi.data_by_cycle.pop(0)
batch.extend(head_data)
return batch
def peek_axi_head_data(self):
batch = []
for axi in self.axi_list:
if len(axi.data_by_cycle) == 0:
batch.extend([None, None, None, None])
else:
head_data = axi.data_by_cycle[0]
batch.extend(head_data)
return batch
def write_axi_data(self, axi_dir):
for axi in self.axi_list:
print(f'AXI {axi.id}:')
filepath = os.path.join(axi_dir, f"axi_{axi.id}.txt")
with open(filepath, 'w') as f:
for item in axi.data:
f.write(f'{item.value}\n')
print(f'{item._edge_name}', end=', ')
print()
print()
def write_weights_from_axi(self, data, filename):
"""Write data from each AXI to file."""
with open(filename, 'w') as f:
for values in | np.transpose(data) | numpy.transpose |
###############################################################################
# actionAngle: a Python module to calculate actions, angles, and frequencies
#
# class: actionAngleIsochroneApprox
#
# Calculate actions-angle coordinates for any potential by using
# an isochrone potential as an approximate potential and using
# a Fox & Binney (2013?) + torus machinery-like algorithm
# (angle-fit) (Bovy 2014)
#
# methods:
# __call__: returns (jr,lz,jz)
# actionsFreqs: returns (jr,lz,jz,Or,Op,Oz)
# actionsFreqsAngles: returns (jr,lz,jz,Or,Op,Oz,ar,ap,az)
#
###############################################################################
import math
import warnings
import numpy as nu
import numpy.linalg as linalg
from scipy import optimize
from galpy.potential import dvcircdR, vcirc, _isNonAxi
from galpy.potential.Potential import flatten as flatten_potential
from .actionAngleIsochrone import actionAngleIsochrone
from .actionAngle import actionAngle
from galpy.potential import IsochronePotential, MWPotential
from galpy.util import bovy_plot, galpyWarning
from galpy.util.bovy_conversion import physical_conversion, \
potential_physical_input, time_in_Gyr
_TWOPI= 2.*nu.pi
_ANGLETOL= 0.02 #tolerance for deciding whether full angle range is covered
_APY_LOADED= True
try:
from astropy import units
except ImportError:
_APY_LOADED= False
class actionAngleIsochroneApprox(actionAngle):
"""Action-angle formalism using an isochrone potential as an approximate potential and using a Fox & Binney (2014?) like algorithm to calculate the actions using orbit integrations and a torus-machinery-like angle-fit to get the angles and frequencies (Bovy 2014)"""
def __init__(self,*args,**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize an actionAngleIsochroneApprox object
INPUT:
Either:
b= scale parameter of the isochrone parameter (can be Quantity)
ip= instance of a IsochronePotential
aAI= instance of an actionAngleIsochrone
pot= potential to calculate action-angle variables for
tintJ= (default: 100) time to integrate orbits for to estimate actions (can be Quantity)
ntintJ= (default: 10000) number of time-integration points
integrate_method= (default: 'dopr54_c') integration method to use
dt= (None) orbit.integrate dt keyword (for fixed stepsize integration)
maxn= (default: 3) Default value for all methods when using a grid in vec(n) up to this n (zero-based)
ro= distance from vantage point to GC (kpc; can be Quantity)
vo= circular velocity at ro (km/s; can be Quantity)
OUTPUT:
instance
HISTORY:
2013-09-10 - Written - Bovy (IAS)
"""
actionAngle.__init__(self,
ro=kwargs.get('ro',None),vo=kwargs.get('vo',None))
if not 'pot' in kwargs: #pragma: no cover
raise IOError("Must specify pot= for actionAngleIsochroneApprox")
self._pot= flatten_potential(kwargs['pot'])
if self._pot == MWPotential:
warnings.warn("Use of MWPotential as a Milky-Way-like potential is deprecated; galpy.potential.MWPotential2014, a potential fit to a large variety of dynamical constraints (see Bovy 2015), is the preferred Milky-Way-like potential in galpy",
galpyWarning)
if not 'b' in kwargs and not 'ip' in kwargs \
and not 'aAI' in kwargs: #pragma: no cover
raise IOError("Must specify b=, ip=, or aAI= for actionAngleIsochroneApprox")
if 'aAI' in kwargs:
if not isinstance(kwargs['aAI'],actionAngleIsochrone): #pragma: no cover
raise IOError("'Provided aAI= does not appear to be an instance of an actionAngleIsochrone")
self._aAI= kwargs['aAI']
elif 'ip' in kwargs:
ip= kwargs['ip']
if not isinstance(ip,IsochronePotential): #pragma: no cover
raise IOError("'Provided ip= does not appear to be an instance of an IsochronePotential")
self._aAI= actionAngleIsochrone(ip=ip)
else:
if _APY_LOADED and isinstance(kwargs['b'],units.Quantity):
b= kwargs['b'].to(units.kpc).value/self._ro
else:
b= kwargs['b']
self._aAI= actionAngleIsochrone(ip=IsochronePotential(b=b,
normalize=1.))
self._tintJ= kwargs.get('tintJ',100.)
if _APY_LOADED and isinstance(self._tintJ,units.Quantity):
self._tintJ= self._tintJ.to(units.Gyr).value\
/time_in_Gyr(self._vo,self._ro)
self._ntintJ= kwargs.get('ntintJ',10000)
self._integrate_dt= kwargs.get('dt',None)
self._tsJ= nu.linspace(0.,self._tintJ,self._ntintJ)
self._integrate_method= kwargs.get('integrate_method','dopr54_c')
self._maxn= kwargs.get('maxn',3)
self._c= False
ext_loaded= False
if ext_loaded and (('c' in kwargs and kwargs['c'])
or not 'c' in kwargs): #pragma: no cover
self._c= True
else:
self._c= False
# Check the units
self._check_consistent_units()
return None
def _evaluate(self,*args,**kwargs):
"""
NAME:
__call__ (_evaluate)
PURPOSE:
evaluate the actions (jr,lz,jz)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
cumul= if True, return the cumulative average actions (to look
at convergence)
OUTPUT:
(jr,lz,jz)
HISTORY:
2013-09-10 - Written - Bovy (IAS)
"""
R,vR,vT,z,vz,phi= self._parse_args(False,False,*args)
if self._c: #pragma: no cover
pass
else:
#Use self._aAI to calculate the actions and angles in the isochrone potential
acfs= self._aAI._actionsFreqsAngles(R.flatten(),
vR.flatten(),
vT.flatten(),
z.flatten(),
vz.flatten(),
phi.flatten())
jrI= nu.reshape(acfs[0],R.shape)[:,:-1]
jzI= nu.reshape(acfs[2],R.shape)[:,:-1]
anglerI= nu.reshape(acfs[6],R.shape)
anglezI= | nu.reshape(acfs[8],R.shape) | numpy.reshape |
# -*- coding: utf-8 -*-
'''
Tests for NDCube
'''
from collections import OrderedDict
import datetime
import pytest
import numpy as np
import astropy.units as u
from ndcube import NDCube, NDCubeOrdered
from ndcube.utils.wcs import WCS, _wcs_slicer
from ndcube.tests import helpers
from ndcube.ndcube_sequence import NDCubeSequence
# sample data for tests
# TODO: use a fixture reading from a test file. file TBD.
ht = {'CTYPE3': 'HPLT-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.5, 'CRPIX3': 0, 'CRVAL3': 0, 'NAXIS3': 2,
'CTYPE2': 'WAVE ', 'CUNIT2': 'Angstrom', 'CDELT2': 0.2, 'CRPIX2': 0, 'CRVAL2': 0,
'NAXIS2': 3,
'CTYPE1': 'TIME ', 'CUNIT1': 'min', 'CDELT1': 0.4, 'CRPIX1': 0, 'CRVAL1': 0, 'NAXIS1': 4}
wt = WCS(header=ht, naxis=3)
data = np.array([[[1, 2, 3, 4], [2, 4, 5, 3], [0, -1, 2, 3]],
[[2, 4, 5, 1], [10, 5, 2, 2], [10, 3, 3, 0]]])
hm = {'CTYPE1': 'WAVE ', 'CUNIT1': 'Angstrom', 'CDELT1': 0.2, 'CRPIX1': 0, 'CRVAL1': 10,
'NAXIS1': 4,
'CTYPE2': 'HPLT-TAN', 'CUNIT2': 'deg', 'CDELT2': 0.5, 'CRPIX2': 2, 'CRVAL2': 0.5,
'NAXIS2': 3,
'CTYPE3': 'HPLN-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.4, 'CRPIX3': 2, 'CRVAL3': 1, 'NAXIS3': 2}
wm = WCS(header=hm, naxis=3)
h_disordered = {
'CTYPE1': 'TIME ', 'CUNIT1': 'min', 'CDELT1': 0.4, 'CRPIX1': 0, 'CRVAL1': 0, 'NAXIS1': 2,
'CTYPE2': 'WAVE ', 'CUNIT2': 'Angstrom', 'CDELT2': 0.2, 'CRPIX2': 0, 'CRVAL2': 10,
'NAXIS2': 4,
'CTYPE3': 'HPLT-TAN', 'CUNIT3': 'deg', 'CDELT3': 0.5, 'CRPIX3': 2, 'CRVAL3': 0.5,
'NAXIS3': 3,
'CTYPE4': 'HPLN-TAN', 'CUNIT4': 'deg', 'CDELT4': 0.4, 'CRPIX4': 2, 'CRVAL4': 1, 'NAXIS4': 2}
w_disordered = WCS(header=h_disordered, naxis=4)
data_disordered = np.zeros((2, 3, 4, 2))
data_disordered[:, :, :, 0] = data
data_disordered[:, :, :, 1] = data
h_ordered = {
'CTYPE1': 'HPLN-TAN', 'CUNIT1': 'deg', 'CDELT1': 0.4, 'CRPIX1': 2, 'CRVAL1': 1, 'NAXIS1': 2,
'CTYPE2': 'HPLT-TAN', 'CUNIT2': 'deg', 'CDELT2': 0.5, 'CRPIX2': 2, 'CRVAL2': 0.5,
'NAXIS2': 3,
'CTYPE3': 'WAVE ', 'CUNIT3': 'Angstrom', 'CDELT3': 0.2, 'CRPIX3': 0, 'CRVAL3': 10,
'NAXIS3': 4,
'CTYPE4': 'TIME ', 'CUNIT4': 'min', 'CDELT4': 0.4, 'CRPIX4': 0, 'CRVAL4': 0, 'NAXIS4': 2}
w_ordered = WCS(header=h_ordered, naxis=4)
data_ordered = np.zeros((2, 4, 3, 2))
data_ordered[0] = data.transpose()
data_ordered[1] = data.transpose()
h_rotated = {'CTYPE1': 'HPLN-TAN', 'CUNIT1': 'arcsec', 'CDELT1': 0.4, 'CRPIX1': 0,
'CRVAL1': 0, 'NAXIS1': 5,
'CTYPE2': 'HPLT-TAN', 'CUNIT2': 'arcsec', 'CDELT2': 0.5, 'CRPIX2': 0,
'CRVAL2': 0, 'NAXIS2': 5,
'CTYPE3': 'Time ', 'CUNIT3': 'seconds', 'CDELT3': 0.3, 'CRPIX3': 0,
'CRVAL3': 0, 'NAXIS3': 2,
'PC1_1': 0.714963912964, 'PC1_2': -0.699137151241, 'PC1_3': 0.0,
'PC2_1': 0.699137151241, 'PC2_2': 0.714963912964, 'PC2_3': 0.0,
'PC3_1': 0.0, 'PC3_2': 0.0, 'PC3_3': 1.0}
w_rotated = WCS(header=h_rotated, naxis=3)
data_rotated = np.array([[[1, 2, 3, 4, 6], [2, 4, 5, 3, 1], [0, -1, 2, 4, 2], [3, 5, 1, 2, 0]],
[[2, 4, 5, 1, 3], [1, 5, 2, 2, 4], [2, 3, 4, 0, 5], [0, 1, 2, 3, 4]]])
mask_cubem = data > 0
mask_cube = data >= 0
uncertaintym = data
uncertainty = np.sqrt(data)
mask_disordered = data_disordered > 0
uncertainty_disordered = data_disordered
mask_ordered = data_ordered > 0
uncertainty_ordered = data_ordered
cubem = NDCube(
data,
wm,
mask=mask_cubem,
uncertainty=uncertaintym,
extra_coords=[('time', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
('hello', 1, u.Quantity(range(data.shape[1]), unit=u.pix)),
('bye', 2, u.Quantity(range(data.shape[2]), unit=u.pix))])
cube_disordered_inputs = (
data_disordered, w_disordered, mask_disordered, uncertainty_disordered,
[('spam', 0, u.Quantity(range(data_disordered.shape[0]), unit=u.pix)),
('hello', 1, u.Quantity(range(data_disordered.shape[1]), unit=u.pix)),
('bye', 2, u.Quantity(range(data_disordered.shape[2]), unit=u.pix))])
cube_disordered = NDCube(cube_disordered_inputs[0], cube_disordered_inputs[1],
mask=cube_disordered_inputs[2], uncertainty=cube_disordered_inputs[3],
extra_coords=cube_disordered_inputs[4])
cube_ordered = NDCubeOrdered(
data_ordered,
w_ordered,
mask=mask_ordered,
uncertainty=uncertainty_ordered,
extra_coords=[('spam', 3, u.Quantity(range(data_disordered.shape[0]), unit=u.pix)),
('hello', 2, u.Quantity(range(data_disordered.shape[1]), unit=u.pix)),
('bye', 1, u.Quantity(range(data_disordered.shape[2]), unit=u.pix))])
cube = NDCube(
data,
wt,
mask=mask_cube,
uncertainty=uncertainty,
missing_axis=[False, False, False, True],
extra_coords=[('time', 0, u.Quantity(range(data.shape[0]), unit=u.pix)),
('hello', 1, u.Quantity(range(data.shape[1]), unit=u.pix)),
('bye', 2, u.Quantity(range(data.shape[2]), unit=u.pix))])
cubet = NDCube(
data,
wm,
mask=mask_cubem,
uncertainty=uncertaintym,
extra_coords=[('time', 0, np.array([datetime.datetime(2000, 1, 1)+datetime.timedelta(minutes=i)
for i in range(data.shape[0])])),
('hello', 1, u.Quantity(range(data.shape[1]), unit=u.pix)),
('bye', 2, u.Quantity(range(data.shape[2]), unit=u.pix))])
cube_rotated = NDCube(
data_rotated,
w_rotated,
mask=mask_cube,
uncertainty=uncertainty,
missing_axis=[False, False, False],
extra_coords=[('time', 0, u.Quantity(range(data_rotated.shape[0]), unit=u.pix)),
('hello', 1, u.Quantity(range(data_rotated.shape[1]), unit=u.pix)),
('bye', 2, u.Quantity(range(data_rotated.shape[2]), unit=u.pix))])
@pytest.mark.parametrize(
"test_input,expected,mask,wcs,uncertainty,dimensions,world_axis_physical_types,extra_coords",
[(cubem[:, 1],
NDCube,
mask_cubem[:, 1],
_wcs_slicer(wm, [False, False, False], (slice(None, None, None), 1)),
data[:, 1],
u.Quantity((2, 4), unit=u.pix),
('custom:pos.helioprojective.lon', 'em.wl'),
{'bye': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)}}
),
(cubem[:, 0:2],
NDCube,
mask_cubem[:, 0:2],
_wcs_slicer(wm, [False, False, False], (slice(None, None, None), slice(0, 2, None))),
data[:, 0:2],
u.Quantity((2, 2, 4), unit=u.pix),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat', 'em.wl'),
{'bye': {'axis': 2, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(2), unit=u.pix)},
'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)}}
),
(cubem[:, :],
NDCube,
mask_cubem[:, :],
_wcs_slicer(wm, [False, False, False], (slice(None, None, None), slice(None, None, None))),
data[:, :],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[1, 1],
NDCube,
mask_cubem[1, 1],
_wcs_slicer(wm, [False, False, False], (1, 1)),
data[1, 1],
u.Quantity((4, ), unit=u.pix),
tuple(['em.wl']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[1, 0:2],
NDCube,
mask_cubem[1, 0:2],
_wcs_slicer(wm, [False, False, False], (1, slice(0, 2, None))),
data[1, 0:2],
u.Quantity((2, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(2), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[1, :],
NDCube,
mask_cubem[1, :],
_wcs_slicer(wm, [False, False, False], (1, slice(None, None, None))),
data[1, :],
u.Quantity((3, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cube[:, 1],
NDCube,
mask_cube[:, 1],
_wcs_slicer(wt, [True, False, False, False], (slice(None, None, None), 1)),
uncertainty[:, 1],
u.Quantity((2, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[:, 0:2],
NDCube,
mask_cube[:, 0:2],
_wcs_slicer(wt, [True, False, False, False], (slice(None, None, None), slice(0, 2, None))),
uncertainty[:, 0:2],
u.Quantity((2, 2, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(2), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[:, :],
NDCube,
mask_cube[:, :],
_wcs_slicer(wt, [True, False, False, False],
(slice(None, None, None), slice(None, None, None))),
uncertainty[:, :],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[1, 1],
NDCube,
mask_cube[1, 1],
_wcs_slicer(wt, [True, False, False, False], (1, 1)),
uncertainty[1, 1],
u.Quantity((4, ), unit=u.pix),
tuple(['time']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[1, 0:2],
NDCube,
mask_cube[1, 0:2],
_wcs_slicer(wt, [True, False, False, False], (1, slice(0, 2, None))),
uncertainty[1, 0:2],
u.Quantity((2, 4), unit=u.pix),
('em.wl', 'time'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(2), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[1, :],
NDCube,
mask_cube[1, :],
_wcs_slicer(wt, [True, False, False, False], (1, slice(0, 2, None))),
uncertainty[1, :],
u.Quantity((3, 4), unit=u.pix),
('em.wl', 'time'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
)])
def test_slicing_second_axis(test_input, expected, mask, wcs, uncertainty,
dimensions, world_axis_physical_types, extra_coords):
assert isinstance(test_input, expected)
assert np.all(test_input.mask == mask)
helpers.assert_wcs_are_equal(test_input.wcs, wcs[0])
assert test_input.missing_axis == wcs[1]
assert test_input.uncertainty.array.shape == uncertainty.shape
assert np.all(test_input.dimensions.value == dimensions.value)
assert test_input.dimensions.unit == dimensions.unit
assert test_input.world_axis_physical_types == world_axis_physical_types
helpers.assert_extra_coords_equal(test_input.extra_coords, extra_coords)
@pytest.mark.parametrize(
"test_input,expected,mask,wcs,uncertainty,dimensions,world_axis_physical_types,extra_coords",
[(cubem[1],
NDCube,
mask_cubem[1],
_wcs_slicer(wm, [False, False, False], 1),
data[1],
u.Quantity((3, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[0:2],
NDCube,
mask_cubem[0:2],
_wcs_slicer(wm, [False, False, False], slice(0, 2, None)),
data[0:2],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(2), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[:],
NDCube,
mask_cubem[:],
_wcs_slicer(wm, [False, False, False], slice(None, None, None)),
data[:],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cube[1],
NDCube,
mask_cube[1],
_wcs_slicer(wt, [True, False, False, False], 1),
uncertainty[1],
u.Quantity((3, 4), unit=u.pix),
('em.wl', 'time'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[0:2],
NDCube,
mask_cube[0:2],
_wcs_slicer(wt, [True, False, False, False], slice(0, 2, None)),
uncertainty[0:2],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(2), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[:],
NDCube,
mask_cube[:],
_wcs_slicer(wt, [True, False, False, False], slice(None, None, None)),
uncertainty[:],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
)])
def test_slicing_first_axis(test_input, expected, mask, wcs, uncertainty,
dimensions, world_axis_physical_types, extra_coords):
assert isinstance(test_input, expected)
assert np.all(test_input.mask == mask)
helpers.assert_wcs_are_equal(test_input.wcs, wcs[0])
assert test_input.missing_axis == wcs[1]
assert test_input.uncertainty.array.shape == uncertainty.shape
assert np.all(test_input.dimensions.value == dimensions.value)
assert test_input.dimensions.unit == dimensions.unit
assert test_input.world_axis_physical_types == world_axis_physical_types
helpers.assert_extra_coords_equal(test_input.extra_coords, extra_coords)
@pytest.mark.parametrize(
"test_input,expected,mask,wcs,uncertainty,dimensions,world_axis_physical_types,extra_coords",
[(cubem[:, :, 1],
NDCube,
mask_cubem[:, :, 1],
_wcs_slicer(wm, [False, False, False],
(slice(None, None, None), slice(None, None, None), 1)),
data[:, :, 1],
u.Quantity((2, 3), unit=u.pix),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cubem[:, :, 0:2],
NDCube,
mask_cubem[:, :, 0:2],
_wcs_slicer(wm, [False, False, False],
(slice(None, None, None), slice(None, None, None), slice(0, 2, None))),
data[:, :, 0:2],
u.Quantity((2, 3, 2), unit=u.pix),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cubem[:, :, :],
NDCube,
mask_cubem[:, :, :],
_wcs_slicer(wm, [False, False, False],
(slice(None, None, None), slice(None, None, None), slice(None, None, None))),
data[:, :, :],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lon', 'custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[:, 1, 1],
NDCube,
mask_cubem[:, 1, 1],
_wcs_slicer(wm, [False, False, False], (slice(None, None, None), 1, 1)),
data[:, 1, 1],
u.Quantity((2, ), unit=u.pix),
tuple(['custom:pos.helioprojective.lon']),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cubem[:, 1, 0:2],
NDCube,
mask_cubem[:, 1, 0:2],
_wcs_slicer(wm, [False, False, False], (slice(None, None, None), 1, slice(0, 2, None))),
data[:, 1, 0:2],
u.Quantity((2, 2), unit=u.pix),
('custom:pos.helioprojective.lon', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cubem[:, 1, :],
NDCube,
mask_cubem[:, 1, :],
_wcs_slicer(wm, [False, False, False], (slice(None, None, None), 1, slice(None, None, None))),
data[:, 1, :],
u.Quantity((2, 4), unit=u.pix),
('custom:pos.helioprojective.lon', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[1, :, 1],
NDCube,
mask_cubem[1, :, 1],
_wcs_slicer(wm, [False, False, False], (1, slice(None, None, None), 1)),
data[1, :, 1],
u.Quantity((3, ), unit=u.pix),
tuple(['custom:pos.helioprojective.lat']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cubem[1, :, 0:2],
NDCube,
mask_cubem[1, :, 0:2],
_wcs_slicer(wm, [False, False, False], (1, slice(None, None, None), slice(0, 2, None))),
data[1, :, 0:2],
u.Quantity((3, 2), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cubem[1, :, :],
NDCube,
mask_cubem[1, :, :],
_wcs_slicer(wm, [False, False, False], (1, slice(None, None, None), slice(None, None, None))),
data[1, :, :],
u.Quantity((3, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cubem[1, 1, 1],
NDCube,
mask_cubem[1, 1, 1],
_wcs_slicer(wm, [False, False, False], (1, 1, 1)),
data[1, 1, 1],
u.Quantity((), unit=u.pix),
(),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cubem[1, 1, 0:2],
NDCube,
mask_cubem[1, 1, 0:2],
_wcs_slicer(wm, [False, False, False], (1, 1, slice(0, 2, None))),
data[1, 1, 0:2],
u.Quantity((2, ), unit=u.pix),
tuple(['em.wl']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 0, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cubem[1, 1, :],
NDCube,
mask_cubem[1, 1, :],
_wcs_slicer(wm, [False, False, False], (1, 1, slice(None, None, None))),
data[1, 1, :],
u.Quantity((4, ), unit=u.pix),
tuple(['em.wl']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 0, 'value': u.Quantity(range(int(cubem.dimensions[2].value)), unit=u.pix)}}
),
(cube[:, :, 1],
NDCube,
mask_cube[:, :, 1],
_wcs_slicer(wt, [True, False, False, False],
(slice(None, None, None), slice(None, None, None), 1)),
uncertainty[:, :, 1],
u.Quantity((2, 3), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cube[:, :, 0:2],
NDCube,
mask_cube[:, :, 0:2],
_wcs_slicer(wt, [True, False, False, False],
(slice(None, None, None), slice(None, None, None), slice(0, 2, None))),
uncertainty[:, :, 0:2],
u.Quantity((2, 3, 2), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cube[:, :, :],
NDCube,
mask_cube[:, :, :],
_wcs_slicer(wt, [True, False, False, False],
(slice(None, None, None), slice(None, None, None), slice(None, None, None))),
uncertainty[:, :, :],
u.Quantity((2, 3, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'em.wl', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 2, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[:, 1, 1],
NDCube,
mask_cube[:, 1, 1],
_wcs_slicer(wt, [True, False, False, False], (slice(None, None, None), 1, 1)),
uncertainty[:, 1, 1],
u.Quantity((2, ), unit=u.pix),
tuple(['custom:pos.helioprojective.lat']),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cube[:, 1, 0:2],
NDCube,
mask_cube[:, 1, 0:2],
_wcs_slicer(wt, [True, False, False, False], (slice(None, None, None), 1, slice(0, 2, None))),
uncertainty[:, 1, 0:2],
u.Quantity((2, 2), unit=u.pix),
('custom:pos.helioprojective.lat', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cube[:, 1, :],
NDCube,
mask_cube[:, 1, :],
_wcs_slicer(wt, [True, False, False, False],
(slice(None, None, None), 1, slice(None, None, None))),
uncertainty[:, 1, :],
u.Quantity((2, 4), unit=u.pix),
('custom:pos.helioprojective.lat', 'time'),
{'time': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[0].value)), unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[1, :, 1],
NDCube,
mask_cube[1, :, 1],
_wcs_slicer(wt, [True, False, False, False], (1, slice(None, None, None), 1)),
uncertainty[1, :, 1],
u.Quantity((3, ), unit=u.pix),
tuple(['em.wl']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cube[1, :, 0:2],
NDCube,
mask_cube[1, :, 0:2],
_wcs_slicer(wt, [True, False, False, False], (1, slice(None, None, None), slice(0, 2, None))),
uncertainty[1, :, 0:2],
u.Quantity((3, 2), unit=u.pix),
('em.wl', 'time'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cube[1, :, :],
NDCube,
mask_cube[1, :, :],
_wcs_slicer(wt, [True, False, False, False],
(1, slice(None, None, None), slice(None, None, None))),
uncertainty[1, :, :],
u.Quantity((3, 4), unit=u.pix),
('em.wl', 'time'),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[1].value)), unit=u.pix)},
'bye': {'axis': 1, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
),
(cube[1, 1, 1],
NDCube,
mask_cube[1, 1, 1],
_wcs_slicer(wt, [True, False, False, False], (1, 1, 1)),
uncertainty[1, 1, 1],
u.Quantity((), unit=u.pix),
(),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': None, 'value': u.Quantity(1, unit=u.pix)}}
),
(cube[1, 1, 0:2],
NDCube,
mask_cube[1, 1, 0:2],
_wcs_slicer(wt, [True, False, False, False], (1, 1, slice(0, 2, None))),
uncertainty[1, 1, 0:2],
u.Quantity((2, ), unit=u.pix),
tuple(['time']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 0, 'value': u.Quantity(range(2), unit=u.pix)}}
),
(cube[1, 1, :],
NDCube,
mask_cube[1, 1, :],
_wcs_slicer(wt, [True, False, False, False], (1, 1, slice(0, 2, None))),
uncertainty[1, 1, :],
u.Quantity((4, ), unit=u.pix),
tuple(['time']),
{'time': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'hello': {'axis': None, 'value': u.Quantity(1, unit=u.pix)},
'bye': {'axis': 0, 'value': u.Quantity(range(int(cube.dimensions[2].value)), unit=u.pix)}}
)])
def test_slicing_third_axis(test_input, expected, mask, wcs, uncertainty,
dimensions, world_axis_physical_types, extra_coords):
assert isinstance(test_input, expected)
assert np.all(test_input.mask == mask)
helpers.assert_wcs_are_equal(test_input.wcs, wcs[0])
assert test_input.missing_axis == wcs[1]
assert test_input.uncertainty.array.shape == uncertainty.shape
assert np.all(test_input.dimensions.value == dimensions.value)
assert test_input.dimensions.unit == dimensions.unit
assert test_input.world_axis_physical_types == world_axis_physical_types
helpers.assert_extra_coords_equal(test_input.extra_coords, extra_coords)
@pytest.mark.parametrize("test_input", [(cubem)])
def test_slicing_error(test_input):
with pytest.raises(IndexError):
test_input[None]
with pytest.raises(IndexError):
test_input[0, None]
@pytest.mark.parametrize("test_input,expected", [
(cubem[1].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[0],
wm.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), wm.wcs.crpix[2] - 1, 0)[-2]),
(cubem[1].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[1],
wm.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), wm.wcs.crpix[2] - 1, 0)[0]),
(cubem[0:2].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[0],
wm.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), 0)[-1]),
(cubem[0:2].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[1],
wm.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), 0)[1]),
(cubem[0:2].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[2],
wm.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), 0)[0]),
(cube[1].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[0],
wt.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), wt.wcs.crpix[2] - 1,
wt.wcs.crpix[3] - 1, 0)[1]),
(cube[1].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[1],
wt.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), wt.wcs.crpix[2] - 1,
wt.wcs.crpix[3] - 1, 0)[0]),
(cube[0:2].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[0],
wt.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), wt.wcs.crpix[3] - 1, 0)[2]),
(cube[0:2].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[1],
wt.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), wt.wcs.crpix[3] - 1, 0)[1]),
(cube[0:2].pixel_to_world(*[
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix)
])[2],
wt.all_pix2world(
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix),
u.Quantity(np.arange(4), unit=u.pix), wt.wcs.crpix[3] - 1, 0)[0])])
def test_pixel_to_world(test_input, expected):
assert np.all(test_input.value == expected)
@pytest.mark.parametrize("test_input,expected", [
(cubem[1].world_to_pixel(*[
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m)
])[1],
wm.all_world2pix(
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m), wm.wcs.crpix[2] - 1, 0)[0]),
(cubem[0:2].world_to_pixel(*[
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m)
])[0],
wm.all_world2pix(
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m), 0)[-1]),
(cubem[0:2].world_to_pixel(*[
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m)
])[1],
wm.all_world2pix(
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity(np.arange(4), unit=u.m), 0)[1]),
(cubem[0:2].world_to_pixel(*[
u.Quantity(np.arange(4), unit=u.deg),
u.Quantity( | np.arange(4) | numpy.arange |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import Isomap
from scipy.spatial.distance import pdist
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score, LeaveOneOut
RANDOM_STATE = 42
def calculate_pairwise_distances(df_for_Box_Plot_features, points, distance='euclidean'):
"""
Computes Pairwise euclidean distances
Parameters
----------
df_for_Box_Plot_features : list
original features
points : nD array
embedding
distance: String
distance, default value is "euclidean"
Returns
----------
distance_original : nD array
euclidean distances in the original dataset
distance_embeddings : nD array
euclidean distances in the embedding
"""
distance_original = pdist(df_for_Box_Plot_features, metric=distance)
distance_embeddings = pdist(points, metric=distance)
return distance_original, distance_embeddings
def calculate_geodesic_distance(df_for_Box_Plot_features, points):
"""
Computes Pairwise geodesic distances
Parameters
----------
df_for_Box_Plot_features : list
original features
points : nD array
embedding
Returns
----------
geo_distance_original : nD array
geodesic distances in the original dataset
geo_distance_embeddings : nD array
geodesic distances in the embedding
"""
embedding = Isomap(n_components=2)
embedding.fit(df_for_Box_Plot_features)
unsquareform = lambda a: a[np.nonzero( | np.triu(a, 1) | numpy.triu |
# This import you need
from models.adatk_model import ADAModel
# Everything else depends on what your model requires
import numpy as np
import wx
import array
import tempfile
import os.path
import os
try:
import Image
except ImportError: # Use alternate PIL module loading
from PIL import Image
import cv2
# All ADA Models must be a subclass of ADAModel
class CompositeADABasic1(ADAModel):
# All ADA Models must define the following information fields.
name = "composites ADA basic 1"
description = "Composites ADA - Basic Model 1"
authors = "Computational Tools and TRI/Austin, Inc."
version = "1.1"
url = "www.nditoolbox.com"
copyright = ""
def __init__(self):
ADAModel.__init__(self, self.name, self.description, self.inputdata, self.outputdata, self.indcalls,
self.indmetrics, self.inddata, self.params, self.settings)
def run(self):
"""Executes the ADA Model"""
# Example busy work
print("Input Data Configuration:")
for key, value in self.inputdata.iteritems():
print("\t{0}={1}".format(key, value))
print("\nOutput Data Configuration:")
for key, value in self.outputdata.iteritems():
print("\t{0}={1}".format(key, value))
print("\nIndication Calls Configuration:")
for key, value in self.indcalls.iteritems():
print("\t{0}={1}".format(key, value))
print("\nIndication Metrics Configuration:")
for key, value in self.indmetrics.iteritems():
print("\t{0}={1}".format(key, value))
print("\nIndication Data Configuration:")
for key, value in self.inddata.iteritems():
print("\t{0}={1}".format(key, value))
print("\nParameters Configuration:")
for key, value in self.params.iteritems():
print("\t{0}={1}".format(key, value))
print("\nSettings Configuration:")
for key, value in self.settings.iteritems():
print("\t{0}={1}".format(key, value))
print("\n")
############################################################################
self.select_filename()
filepath = self.filenm
#ext = ".rf"
if ".rf" in filepath:
self.load_rf()
#
self.para_a1 = 0.500 # %FSH or largest signal for frontwall / backwall calls = 64
self.para_a2 = 0.500 # %FSH for second signal threshold (making feature calls) = 51
self.para_a3 = 0.280 # %FSH for through thickness features (making feature calls) - defect features
self.para_a4 = 0.500 # %drop from FSH for backwall signal
self.para_t1 = 24 # time offset 1 - ringdown for front wall signal
self.para_t2 = 9 # time offset 2 - ringdown before back wall signal
self.para_c1 = 9 # 9 pixels in total area
self.para_c2 = 3 # 3 pixels wide
self.para_c3 = 3 # 3 pixels long
#
elif ".sdt" in filepath:
self.load_sdt()
#
self.para_a1 = 0.500 # %FSH or largest signal for frontwall / backwall calls = 64
self.para_a2 = 0.398 # %FSH for second signal threshold (making feature calls) = 51
self.para_a3 = 0.280 # %FSH for through thickness features (making feature calls) - defect features
self.para_a4 = 0.500 # %drop from FSH for backwall signal
self.para_t1 = 380 # time offset 1 - ringdown for front wall signal
self.para_t2 = 75 # time offset 2 - ringdown before back wall signal
self.para_c1 = 9 # 9 pixels in total area
self.para_c2 = 3 # 3 pixels wide
self.para_c3 = 3 # 3 pixels long
#
#else:
# return
#
Nx, Ny, Nt = self.inter_data.shape
#
datatmp_1mx = self.inter_data.max(2)
datatmp_1mn = self.inter_data.min(2)
datatmp_1 = datatmp_1mx - datatmp_1mn
#
data_1 = datatmp_1.astype('f')
#data_1 = data_1 - 128.0
#
datatmp_2 = self.inter_data.argmax(2) + self.inter_data.argmin(2)
data_2 = 0.5*datatmp_2.astype('f')
########################################
# evaluate mean A-scan signal
t = np.array([np.arange(0, Nt, 1)])
datatmp_5 = self.inter_data.mean(0)
datatmp_6 = datatmp_5.mean(0)
ta = np.zeros((2,Nt))
for idx in range(Nt):
ta[0,idx] = t[0,idx]
ta[1,idx] = datatmp_6[idx]
#
#
# step 1a) call all transitions
datatmp1mxa = datatmp_1mx.max(0)
datatmp1mx = datatmp1mxa.max() - 128.0
#
self.para1x = round(self.para_a1*datatmp1mx)
self.para2x = round(self.para_a2*datatmp1mx)
self.para3x = round(2.0*self.para_a3*datatmp1mx)
self.para4x = round(self.para_a4*datatmp1mx)
#
data_m1a = np.zeros((Nx,Ny)) # near surface map - TOF (1st cross)
data_m1t = np.zeros((Nx,Ny)) # near surface map - AMP (global)
data_m2a = np.zeros((Nx,Ny)) # near surface map - AMP
data_m2b = np.zeros((Nx,Ny)) # near surface map - TOF (1st cross)
data_m2t = np.zeros((Nx,Ny)) # near surface map - TOF (peak)
data_m3a = np.zeros((Nx,Ny)) # near surface map - AMP
data_m3t = np.zeros((Nx,Ny)) # near surface map - TOF
data_m4a = np.zeros((Nx,Ny)) # near surface map - AMP
#
data_t0 = np.arange(Nt) # extract time step vector 1
for i1 in range(Nx):
for j1 in range(Ny):
data_t1 = self.inter_data[i1,j1,:] - 128.0 # extract signal vector 1
#data_t1 = data_tx.astype('f') - 128.0
data_t1b = data_t1 > self.para1x # evaluate threshold vector 1
data_tmp = data_t1[data_t1b] # (store first amplitude signal that exceeds threshold)
if data_tmp.size:
data_m1a[i1,j1] = data_tmp[0] # save - amplitude map 1
data_tmp = data_t0[data_t1b] # (store first time step that exceeds threshold)
i_a = data_tmp[0]
data_m1t[i1,j1] = i_a # save - TOF map 1
#
data_t2 = data_t1[(i_a+self.para_t1):] # extract vector 2 - thickness and backwall signals
data_m2a[i1,j1] = data_t2.max() # save - amplitude map 2
data_m2b[i1,j1] = data_t2.argmax() # save - amplitude map 2
#
data_t02 = data_t0[(i_a+self.para_t1):] # extract time step vector 2
data_t2b = data_t2 > self.para2x # evaluate threshold vector 2
data_tmp = data_t02[data_t2b]
if data_tmp.size:
i_b = data_tmp[0]
data_m2t[i1,j1] = i_b # save - amplitude map 1
#data_median1_iny = np.zeros(Nx)
data_median2_iny = np.zeros(Nx)
#data_median1_inx = np.zeros(Ny)
data_median2_inx = np.zeros(Ny)
for i1 in range(Nx):
#data_median1_iny[i1] = np.median(data_m1t[i1,:])
data_median2_iny[i1] = np.median(data_m2t[i1,:])
#
for j1 in range(Ny):
#data_median1_inx[j1] = np.median(data_m1t[:,j1])
data_median2_inx[j1] = np.median(data_m2t[:,j1])
#
#data_median1_g = np.median(data_median1_inx)
data_median2_g = np.median(data_median2_inx)
for i1 in range(Nx):
for j1 in range(Ny):
data_t1 = self.inter_data[i1,j1,:] - 128.0 # extract signal vector 1
i_a = int(data_m1t[i1,j1] + self.para_t1)
i_b = int(data_median2_iny[i1] + data_median2_inx[j1] - data_median2_g - self.para_t1)
if i_b <= i_a:
i_b = data_median2_g - self.para_t1
if i_a >= i_b:
i_a = self.para_t1
i_c = i_b + 1
if i_c >= Nt:
i_c = Nt - 1
#print("\ti_a:{0}".format(i_a))
#print("\ti_b:{0}".format(i_b))
#print("\ti_c:{0}".format(i_c))
#
data_t2 = data_t1[i_a:i_b] # extract vector 3 - thickness only
data_m3a[i1,j1] = data_t2.max()-data_t2.min() # save - pp amplitude map 3 (thickness only)
data_m3t[i1,j1] = 0.5*(data_t2.argmax()+data_t2.argmin()) # save - TOF map 3 (thickness only)
#
data_t3 = data_t1[i_c:] # extract vector 2 - thickness and backwall signals
data_m4a[i1,j1] = data_t3.max() # save - amplitude map 4 (backwall only)
# through backwall drop threshold
data1 = (data_m4a < self.para4x)
data_f1 = data1.astype('f')
# through thickness threshold
data1 = (data_m3a > self.para3x)
data_f2 = data1.astype('f')
# Call ADA code - Step 2 (evaluate regions that match call criteria)
self.tmp_data = data_m3a
self.tmp_data2 = data_m4a
self.on_ada_1()
Nr = int(self.nb)
#
ikey_tmp = []
for ikey, ivalues in sorted(self.indcalls.iteritems()):
ikey_tmp.append(ikey)
Nc1 = len(ikey_tmp)
ikey_tmp = []
for ikey, ivalues in sorted(self.indmetrics.iteritems()):
ikey_tmp.append(ikey)
Nc2 = len(ikey_tmp)
Nc = Nc1 + Nc2
#
if int(self.nb) == 0:
Nr = 1
self.nb = 1
idx1 = 0
self.feat_bx.append(1)
self.feat_by.append(1)
self.feat_bw.append(1)
self.feat_bh.append(1)
#
model_data = np.zeros((Nr,Nc),'float')
model_data[idx1,0] = 0.0
model_data[idx1,1] = 0.0
model_data[idx1,2] = 0.0
model_data[idx1,3] = 0.0
model_data[idx1,4] = 0.0
model_data[idx1,5] = 0.0
#
model_data[idx1,6] = 0.0
model_data[idx1,7] = 0.0
model_data[idx1,8] = 0.0
#
model_data[idx1,9] = 0.0
model_data[idx1,10] = 0.0
model_data[idx1,11] = 0.0
else:
model_data = np.zeros((Nr,Nc),'float')
for idx1 in range(Nr):
model_data[idx1,0] = self.call[idx1]
model_data[idx1,1] = self.feat_cx[idx1]
model_data[idx1,2] = self.feat_cy[idx1]
model_data[idx1,3] = self.feat_a1[idx1]
model_data[idx1,4] = self.feat_bw[idx1]
model_data[idx1,5] = self.feat_bh[idx1]
#
j1 = np.round(self.feat_cx[idx1])
i1 = np.round(self.feat_cy[idx1])
#
model_data[idx1,0] = 2.0*data_f1[i1,j1] + data_f2[i1,j1]
model_data[idx1,6] = data_m3t[i1,j1]
model_data[idx1,7] = data_m3a[i1,j1]
model_data[idx1,8] = data_m4a[i1,j1]
#
model_data[idx1,9] = self.feat_bx[idx1]
model_data[idx1,10] = self.feat_by[idx1]
model_data[idx1,11] = self.feat_a2[idx1]
#self.populate_spreadsheet(self.view.output_grid, model.data)
#self.populate_spreadsheet(self.view.output_grid2, model.data)
#self.view.spreadsheet_nb.ChangeSelection(self.view.res_summary_page)
# Store in res_outputdata
model_res_outputdata = []
model_res_outputdata.append(data_m2a)
model_res_outputdata.append(data_m2t)
model_res_outputdata.append(data_m3a)
model_res_outputdata.append(data_m3t)
model_res_outputdata.append(data_m4a)
model_res_outputdata.append(data_m1t)
model_res_outputdata.append(data_f1)
model_res_outputdata.append(data_f2)
model_res_outputdata.append(ta)
#
model_res_inddata1 = []
model_res_inddata2 = []
model_res_inddata3 = []
model_res_inddata4 = []
for idx1 in range(Nr):
ix0 = np.round(self.feat_cx[idx1])
iy0 = np.round(self.feat_cy[idx1])
ix1 = np.round(self.feat_bx[idx1])
iy1 = np.round(self.feat_by[idx1])
ix2 = ix1 + 1 + | np.round(self.feat_bw[idx1]) | numpy.round |
__author__ = 'mason'
from domain_orderFulfillment import *
from timer import DURATION
from state import state
import numpy as np
'''
This is a randomly generated problem
'''
def GetCostOfMove(id, r, loc1, loc2, dist):
return 1 + dist
def GetCostOfLookup(id, item):
return max(1, | np.random.beta(2, 2) | numpy.random.beta |
import numpy as np
from itertools import combinations_with_replacement
from mchap.combinatorics import count_unique_genotypes
from mchap.assemble.likelihood import log_likelihood
from mchap.assemble.prior import log_genotype_prior
from mchap.jitutils import normalise_log_probs
__all__ = ["snp_posterior"]
def snp_posterior(reads, position, n_alleles, ploidy, inbreeding=0, read_counts=None):
"""Brute-force the posterior probability across all possible
genotypes for a single SNP position.
Parameters
----------
reads : ndarray, float, shape (n_reads, n_positions, max_allele)
Reads encoded as probability distributions.
position : int
Position of target SNP within reads.
n_alleles : int
Number of possible alleles for this SNP.
ploidy : int
Ploidy of organism.
inbreeding : float
Expected inbreeding coefficient of organism.
read_counts : ndarray, int, shape (n_reads, )
Count of each read.
Returns
-------
genotypes : ndarray, int, shape (n_genotypes, ploidy)
SNP genotypes.
probabilities : ndarray, float, shape (n_genotypes, )
Probability of each genotype.
"""
n_reads, n_pos, max_allele = reads.shape
if n_reads == 0:
# handle no reads
n_reads = 1
reads = np.empty((n_reads, n_pos, max_allele), dtype=float)
reads[:] = np.nan
u_gens = count_unique_genotypes(n_alleles, ploidy)
genotypes = np.zeros((u_gens, ploidy), dtype=np.int8) - 1
log_probabilities = np.empty(u_gens, dtype=float)
log_probabilities[:] = -np.inf
alleles = np.arange(n_alleles)
for j, genotype in enumerate(combinations_with_replacement(alleles, ploidy)):
genotype = | np.array(genotype) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import sys
import csv
time = []
true_pos = []
est_pos = []
est_pos_std_dev = []
true_vel = []
est_vel = []
est_vel_std_dev = []
true_alt = []
est_alt = []
est_alt_std_dev = []
true_climb = []
est_climb = []
est_climb_std_dev = []
with open(sys.argv[1],'r') as csv_file:
plots = csv.reader(csv_file, delimiter=',')
headers = next(plots, None)
for row in plots:
time.append(float(row[0]))
true_pos.append(float(row[1]))
est_pos.append(float(row[2]))
est_pos_std_dev.append(float(row[3]))
true_vel.append(float(row[4]))
est_vel.append(float(row[5]))
est_vel_std_dev.append(float(row[6]))
true_alt.append(float(row[7]))
est_alt.append(float(row[8]))
est_alt_std_dev.append(float(row[9]))
true_climb.append(float(row[10]))
est_climb.append(float(row[11]))
est_climb_std_dev.append(float(row[12]))
fig, axes = plt.subplots(4, 1)
fig.suptitle("Airplane Tracking Estimation")
axes[0].plot(time, true_pos, "k-", time, est_pos, "b-", \
time, np.array(est_pos) + np.array(est_pos_std_dev), "g--", \
time, np.array(est_pos) - np.array(est_pos_std_dev), "g--")
axes[0].set(xlabel="Time [s]", ylabel="Position [m]")
axes[1].plot(time, true_vel, "k-", time, est_vel, "b-", \
time, np.array(est_vel) + np.array(est_vel_std_dev), "g--", \
time, np.array(est_vel) - np.array(est_vel_std_dev), "g--")
axes[1].set(xlabel="Time [s]", ylabel="Velocity [m/s]")
axes[2].plot(time, true_alt, "k-", time, est_alt, "b-", \
time, np.array(est_alt) + np.array(est_alt_std_dev), "g--", \
time, np.array(est_alt) - np.array(est_alt_std_dev), "g--")
axes[2].set(xlabel="Time [s]", ylabel="Altitude [m]")
axes[3].plot(time, true_climb, "k-", time, est_climb, "b-", \
time, | np.array(est_climb) | numpy.array |
from __future__ import print_function
import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import scipy as sp
import sys
import scipy.ndimage as ndimage
import scipy.interpolate as si
import contmaker as cm
#Takes in an array of chi-squared test results and plots them as a function of the
#sine squared theta values used to get the results. dms is fixed.
def chi2vssst(chi2_array,sst_array,oscParams):
opacity = 0.9
fig, ax = plt.subplots()
#plt.gcf().subplots_adjust(bottom=0.2)
plt.plot(sst_array, chi2_array, alpha=opacity, color='r')
plt.xlabel('Sine-squared theta 12')
plt.ylabel(r'chi-squared')
plt.title(r'Chi-squared value between a statistically fluctuated SNO+' + \
'spectrum (dms = {0}, sst={1},'.format(str(oscParams[0]),str(oscParams[1])) + \
'and a non-fluctuated spectrum\n with' + \
'dms={0} and the y-axis sst value.'.format(oscParams[0]))
#plt.xticks(index + bar_width, x, y=0.001)
#plt.legend()
#plt.tight_layout() #could use instead of the subplots_adjust line
plt.show()
def chi2contour(DeltaMSqs,sst12s,chisqs):
opacity = 0.9
fig = plt.figure()
ax = fig.add_subplot(1,2,1)#,projection='2d')#3d')
#ax.hexbin(sst12s,DeltaMSqs,chisqs)#,color='b',marker = 'o',alpha=opacity)
#ax.plot_surface(sst12s, DeltaMSqs, chisqs)
cont = ax.contourf(sst12s, DeltaMSqs, chisqs)
#ax.annotate(r'$\sin^{2}(\theta _{12})$ =' + str(sst12) + '\n' + \
# r'$\Delta m^{2}_{21}$ = ' + str(m12), xy=(7,40), fontsize = '16',
# xytext=(6.5,40))
ax.set_xlabel('Sine-squared Theta 12')
ax.set_ylabel(r'Delta M-Squared')
ax.set_title(r'Chi-squared map of experiment')
ax2= fig.add_subplot(1,2,2)
Z2 = ndimage.gaussian_filter(chisqs, sigma=1.0, order=0)
ax2.imshow(Z2)
ax2.set_xlabel('Sine-squared Theta 12')
ax2.set_ylabel(r'Delta M-Squared')
ax2.set_title(r'Chi-squared map of experiment')
fig.colorbar(cont,shrink=0.5, aspect=5)
plt.show()
def chi2CLs(data1):
'''
Takes in a data set, plots the delta m-squared and sine-squared
theta values, and plots their 68.3% and 90% CLs on the same plot.
The CLs are calculated in slices and the region between each point is
interpolated.
'''
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# ax.plot(data1['sst'], data1['dms'], 'ro', alpha=0.7, color='b', \
# label='Best fits, universe is' + data1['Params'],zorder=1)
if data1['Params'] == 'KAMLAND':
ax.plot(0.316,7.54E-05, '*', markersize=20, alpha=0.7, color='w', markeredgecolor='b', label = 'KL Values')
avgsst = np.average(data1['sst'])
avgdms = np.average(data1['dms'])
ax.plot(avgsst, avgdms, '*', markersize=20, alpha=0.7, color='r', label = 'Mean of fits',zorder=2)
CL68_sst,CL68_dms = cm.getcontourlines(0.683,120,data1,[avgsst,avgdms])
CL90_sst,CL90_dms = cm.getcontourlines(0.90,120,data1,[avgsst,avgdms])
#tsk = si.splprep(68CL_sst,68CL_dms,s=0)
ax.plot(CL68_sst, CL68_dms, color='blue', label = '68.3% CL')
ax.plot(CL90_sst, CL90_dms, color='purple', label = '90% CL')
ax.set_xlim(0.20,0.55)
ax.set_ylim(0.000055,0.000090)
ax.set_xlabel(r'$\sin^{2}(\theta_{12})$')
ax.set_ylabel(r'$\Delta m^{2}_{12} (ev^{2})$')
ax.set_title('Scatter plot of best-fit oscillation parameters')
ax.grid(True)
box = ax.get_position()
#shrink the graph a bit so the legend fits
ax.set_position([box.x0,box.y0,box.width*0.75, box.height])
plt.legend(loc = 'center left', bbox_to_anchor=(1,0.5))
plt.show()
def chi2scatter(data1):
'''
Takes in a data set, plots the delta m-squared and sine-squared
theta values, and plots them along with their density contours.
'''
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(data1['sst'], data1['dms'], 'ro', alpha=0.7, color='b', \
label='Best fits, universe is' + data1['Params'],zorder=1)
if data1['Params'] == 'KAMLAND':
ax.plot(0.316,7.54E-05, '*', markersize=20, alpha=0.7, color='w', markeredgecolor='b', label = '(1): KL parameters')
#Now, plot a density contour on top
hrange = [[0.20,0.50],[0.00002,0.0003]]
H, xedges, yedges = np.histogram2d(data1['sst'],data1['dms'],range=hrange,bins=30)
H=np.transpose(H) #Zero point is at top right
#xedges, yedges = np.meshgrid(xedges[:-1],yedges[:-1])
extent = [0.20, 0.50, 0.00002, 0.0003] #xedges[0],xedges[-1],yedges[0],yedges[-1]]
CT = ax.contour(H, extent=extent, origin="lower",linewidths=4,zorder=4)
ax.plot(np.average(data1['sst']), np.average(data1['dms']), '*', markersize=20, alpha=0.7, color='r', label = 'Fit avg.',zorder=2)
ax.plot(np.median(data1['sst']), np.median(data1['dms']), '*', markersize=20, alpha=0.7, color='k', label = 'median avg.',zorder=3)
ax.set_xlim(0.20,0.50)
ax.set_ylim(0.00002,0.00030)
ax.set_xlabel(r'$\sin^{2}(\theta_{12})$')
ax.set_ylabel(r'$\Delta m^{2}_{12} (ev^{2})$')
ax.set_title('Scatter plot of best-fit oscillation parameters')
ax.grid(True)
box = ax.get_position()
#shrink the graph a bit so the legend fits
ax.set_position([box.x0,box.y0,box.width*0.75, box.height])
plt.legend(loc = 'center left', bbox_to_anchor=(1,0.5))
plt.colorbar(CT,shrink=0.8, extend='both')
plt.show()
def chi2scatter_2sets(data1, data2,oscParamsSeed1,oscParamsSeed2):
'''
Takes in an array of sine-squared theta values and delta-m squared values
from performing a chi-squared minimization between the SNO+ event spectrum
with oscillation parameters oscParamsSeed = [dms, sst] and the same spectrum
with poisson fluctuations.
'''
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(data1['sst_fits'], data1['dms_fits'], 'ro', alpha=0.7, color='b', label='Best fits to seed (1)')
ax.plot(data2['sst_fits'], data2['dms_fits'], 'ro', alpha=0.7, color='g', label='Best fits to seed (2)')
ax.plot(oscParamsSeed1[1], oscParamsSeed1[0], '*', markersize=20, alpha=0.7, color='w', markeredgecolor='b', label = '(1): KL parameters')
ax.plot(oscParamsSeed2[1], oscParamsSeed2[0], '*', markersize=20, alpha=0.7, color='w', markeredgecolor='g', label = '(2): SK parameters')
ax.plot(np.average(data1['sst_fits']), np.average(data1['dms_fits']), '*', markersize=20, alpha=0.7, color='r', label = 'Fit avg. seed (1)')
ax.plot(np.average(data2['sst_fits']), | np.average(data2['dms_fits']) | numpy.average |
#!/usr/bin/env python
import sys
sys.path.append(r'C:\Program Files (x86)\Keysight\SD1\Libraries\Python')
from BaseDriver import LabberDriver, Error, IdError
import keysightSD1
import numpy as np
import os
import time
class Driver(LabberDriver):
""" This class implements the Keysight PXI digitizer"""
def performOpen(self, options={}):
"""Perform the operation of opening the instrument connection"""
# number of demod blocks in the FPGA
self.num_of_demods = 5
# self.demod_n_pts = self.num_of_demods * 15
self.demod_n_pts = 80
self.bit_stream_name = ''
# set time step and resolution
self.nBit = 16
self.bitRange = float(2**(self.nBit - 1) - 1)
# timeout
self.timeout_ms = int(1000 * self.dComCfg['Timeout'])
# get PXI chassis
self.chassis = int(self.dComCfg.get('PXI chassis', 1))
# create AWG instance
self.dig = keysightSD1.SD_AIN()
AWGPart = self.dig.getProductNameBySlot(
self.chassis, int(self.comCfg.address))
self.log('Serial:', self.dig.getSerialNumberBySlot(
self.chassis, int(self.comCfg.address)))
if not isinstance(AWGPart, str):
raise Error('Unit not available')
# check that model is supported
dOptionCfg = self.dInstrCfg['options']
for validId, validName in zip(
dOptionCfg['model_id'], dOptionCfg['model_str']):
if AWGPart.find(validId) >= 0:
# id found, stop searching
break
else:
# loop fell through, raise ID error
raise IdError(AWGPart, dOptionCfg['model_id'])
# set model
self.setModel(validName)
# sampling rate and number of channles is set by model
if validName in ('M3102', 'M3302'):
# 500 MHz models
self.dt = 2E-9
self.nCh = 4
else:
# assume 100 MHz for all other models
self.dt = 10E-9
self.nCh = 4
# create list of sampled data
self.lTrace = [np.array([])] * self.nCh
self.demod_output_ssb = np.zeros((0,), dtype='complex')
self.demod_buffer = np.zeros((0,), dtype=np.int16)
self.dig.openWithSlot(AWGPart, self.chassis, int(self.comCfg.address))
# get hardware version - changes numbering of channels
hw_version = self.dig.getHardwareVersion()
if hw_version >= 4:
# KEYSIGHT - channel numbers start with 1
self.ch_index_zero = 1
else:
# SIGNADYNE - channel numbers start with 0
self.ch_index_zero = 0
self.log('HW:', hw_version)
self.configure_FPGA()
def configure_FPGA(self, reset=False):
"""Load FPGA bitstream and setup triggers"""
self.fpga_config = self.getValue('FPGA Hardware')
if reset or self.fpga_config == 'Only signals':
bitstream = os.path.join(
os.path.dirname(__file__),
'firmware_FPGAFlow_Clean_2018-05-31T22_22_11.sbp')
elif self.fpga_config in ('FPGA I/Q and signals', 'Only FPGA I/Q'):
bitstream = os.path.join(
os.path.dirname(__file__),
'firmware_FPGAFlow_Demod_v4_IQx5_2018-09-02T19_14_50.sbp')
# don't reload if correct bitstream is already loaded
if bitstream == self.bit_stream_name:
return
if (self.dig.FPGAload(bitstream)) < 0:
if self.fpga_config != 'Only signals':
raise Error('FPGA not loaded, check FPGA version...')
self.bit_stream_name = bitstream
if self.fpga_config != 'Only signals':
for n in range(self.num_of_demods):
LO_freq = self.getValue('LO freq %d' % (n + 1))
self.setFPGALOfreq(n + 1, LO_freq)
self.setFPGATrigger()
def getHwCh(self, n):
"""Get hardware channel number for channel n. n starts at 0"""
return n + self.ch_index_zero
def performClose(self, bError=False, options={}):
"""Perform the close instrument connection operation"""
# do not check for error if close was called with an error
try:
# flush all memory
for n in range(self.nCh):
self.log('Close ch:', n, self.dig.DAQflush(self.getHwCh(n)))
# remove firmware
self.configure_FPGA(reset=True)
# close instrument
self.dig.close()
except Exception:
# never return error here
pass
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# start with setting local quant value
quant.setValue(value)
# if changing FPGA operation, reload firmware
if quant.name == 'FPGA Hardware':
new_value = self.getValue('FPGA Hardware')
# only reload if operation mode changed
if new_value != self.fpga_config:
self.configure_FPGA()
# check if channel-specific, if so get channel + name
if quant.name.startswith('Ch') and len(quant.name) > 6:
ch = int(quant.name[2]) - 1
name = quant.name[6:]
else:
ch, name = None, ''
if (quant.name.startswith('FPGA Voltage') or
quant.name.startswith('FPGA Single-shot')):
demod_num = int(quant.name[-1]) - 1
# proceed depending on command
if quant.name in ('External Trig Source', 'External Trig Config',
'Trig Sync Mode'):
extSource = int(self.getCmdStringFromValue('External Trig Source'))
trigBehavior = int(
self.getCmdStringFromValue('External Trig Config'))
sync = int(self.getCmdStringFromValue('Trig Sync Mode'))
self.dig.DAQtriggerExternalConfig(0, extSource, trigBehavior, sync)
elif quant.name in ('Trig I/O', ):
# get direction and sync from index of comboboxes
direction = int(self.getCmdStringFromValue('Trig I/O'))
self.dig.triggerIOconfig(direction)
elif quant.name in (
'Analog Trig Channel', 'Analog Trig Config', 'Trig Threshold'):
# get trig channel
trigCh = self.getValueIndex('Analog Trig Channel')
mod = int(self.getCmdStringFromValue('Analog Trig Config'))
threshold = self.getValue('Trig Threshold')
self.dig.channelTriggerConfig(self.getHwCh(trigCh), mod, threshold)
elif name in ('Range', 'Impedance', 'Coupling'):
# set range, impedance, coupling at once
rang = self.getRange(ch)
imp = int(self.getCmdStringFromValue('Ch%d - Impedance' % (ch + 1)))
coup = int(self.getCmdStringFromValue('Ch%d - Coupling' % (ch + 1)))
self.dig.channelInputConfig(self.getHwCh(ch), rang, imp, coup)
# FPGA configuration
if quant.name.startswith('LO freq'):
demod_num = int(quant.name[-1])
LO_freq = self.getValue('LO freq ' + str(demod_num))
value = self.setFPGALOfreq(demod_num, LO_freq)
elif quant.name in ('Skip time', 'Integration time'):
self.setFPGATrigger()
return value
def performGetValue(self, quant, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# check if channel-specific, if so get channel + name
if quant.name.startswith('Ch') and len(quant.name) > 6:
ch = int(quant.name[2]) - 1
name = quant.name[6:]
else:
ch, name = None, ''
if (quant.name.startswith('FPGA Voltage') or
quant.name.startswith('FPGA Single-shot')):
demod_num = int(quant.name[-1]) - 1
if (name == 'Signal' or quant.name.startswith('FPGA Voltage') or
quant.name.startswith('FPGA Single-shot')):
if self.isHardwareLoop(options):
"""Get data from round-robin type averaging"""
(seq_no, n_seq) = self.getHardwareLoopIndex(options)
# acquisition was started when arming, just read data
if name == 'Signal':
return quant.getTraceDict(
self.reshaped_traces[ch][seq_no], dt=self.dt)
elif quant.name.startswith('FPGA Voltage I,'):
return self.demod_output_I[demod_num]
elif quant.name.startswith('FPGA Single-shot I,'):
return quant.getTraceDict(
self.demod_output_vector_I[demod_num][seq_no], dt=1)
elif quant.name.startswith('FPGA Voltage Q,'):
return self.demod_output_Q[demod_num]
elif quant.name.startswith('FPGA Single-shot Q,'):
return quant.getTraceDict(
self.demod_output_vector_Q[demod_num][seq_no], dt=1)
elif quant.name.startswith('FPGA Single-shot REF,'):
return quant.getTraceDict(
self.demod_output_vector_ref[demod_num][seq_no], dt=1)
elif quant.name.startswith('FPGA Voltage NP,'):
return self.demod_output_NP[demod_num]
elif quant.name.startswith('FPGA Single-shot NP,'):
return quant.getTraceDict(
self.demod_output_vector_NP[demod_num][seq_no], dt=1)
elif quant.name.startswith('FPGA Voltage,'):
return self.demod_output_ssb[demod_num, :, seq_no].mean()
elif quant.name.startswith('FPGA Single-shot,'):
return quant.getTraceDict(
self.demod_output_ssb[demod_num, :, seq_no],
dt=1)
# get traces if first call
if self.isFirstCall(options):
# don't arm and measure if in arm/trig mode, was done at arm
if not self.isHardwareTrig(options):
self.getTraces()
# return correct data
if name == 'Signal':
value = quant.getTraceDict(self.lTrace[ch], dt=self.dt)
elif quant.name.startswith('FPGA Voltage I,'):
value = self.demod_output_I[demod_num]
elif quant.name.startswith('FPGA Single-shot I,'):
value = quant.getTraceDict(
self.demod_output_vector_I[demod_num], dt=1)
elif quant.name.startswith('FPGA Voltage Q,'):
value = self.demod_output_Q[demod_num]
elif quant.name.startswith('FPGA Single-shot Q,'):
value = quant.getTraceDict(
self.demod_output_vector_Q[demod_num], dt=1)
elif quant.name.startswith('FPGA Single-shot REF,'):
value = quant.getTraceDict(
self.demod_output_vector_ref[demod_num], dt=1)
elif quant.name.startswith('FPGA Voltage NP,'):
return self.demod_output_NP[demod_num]
elif quant.name.startswith('FPGA Single-shot NP,'):
return quant.getTraceDict(
self.demod_output_vector_NP[demod_num], dt=1)
elif quant.name.startswith('FPGA Voltage,'):
value = np.mean(self.demod_output_ssb[demod_num])
elif quant.name.startswith('FPGA Single-shot,'):
# if no records, don't average over number of averages
if self.demod_output_ssb.shape[2] <= 1:
value = quant.getTraceDict(
self.demod_output_ssb[demod_num, :, 0], dt=1)
else:
# records are being used, average over number of averages
value = quant.getTraceDict(
self.demod_output_ssb[demod_num].mean(0), dt=1)
else:
# for all others, return local value
value = quant.getValue()
return value
def performArm(self, quant_names, options={}):
"""Perform the instrument arm operation"""
# only arm digitizer if about to measure read-only values
for name in quant_names:
quant = self.getQuantity(name)
if quant.isPermissionRead():
break
else:
# loop fell through, no read-only quantity, don't arm
return
# arm by calling get traces
if self.isHardwareLoop(options):
# in hardware looping, number of records is set by the looping
(seq_no, n_seq) = self.getHardwareLoopIndex(options)
# show status before starting acquisition
self.reportStatus('Digitizer - Waiting for signal')
# get data
self.getTraces(bArm=True, bMeasure=False, n_seq=n_seq)
# report arm completed, to allow client to continue
self.report_arm_completed()
# directly start collecting data (digitizer buffer is limited)
self.getTraces(bArm=False, bMeasure=True, n_seq=n_seq)
# after measurement is done, re-shape data and place in buffer
self.reshaped_traces = []
for trace in self.lTrace:
if len(trace) > 0:
trace = trace.reshape((n_seq, trace.size // n_seq))
self.reshaped_traces.append(trace)
else:
self.getTraces(bArm=True, bMeasure=False)
# report arm completed, to allow client to continue
self.report_arm_completed()
self.getTraces(bArm=False, bMeasure=True)
def getTraces(self, bArm=True, bMeasure=True, n_seq=0):
"""Get all active traces"""
# # test timing
# import time
# t0 = time.clock()
# lT = []
# find out which traces to get
lCh = []
iChMask = 0
for n in range(self.nCh):
if self.fpga_config == 'Only signals':
# normal operation
if self.getValue('Ch%d - Enabled' % (n + 1)):
lCh.append(n)
iChMask += 2**n
elif self.fpga_config == 'FPGA I/Q and signals':
# mixed signal/demod, always enable ch 4 (used for demod)
if (n == 3) or self.getValue('Ch%d - Enabled' % (n + 1)):
lCh.append(n)
iChMask += 2**n
elif self.fpga_config == 'Only FPGA I/Q':
# if only fpga demod, don't read any AWGs but ch 4 (demod)
if n == 3:
lCh.append(n)
iChMask += 2**n
else:
continue
# get current settings
if self.fpga_config in ('Only signals', 'FPGA I/Q and signals'):
nPts = int(self.getValue('Number of samples'))
elif self.fpga_config == 'Only FPGA I/Q':
nPts = self.demod_n_pts
nCyclePerCall = int(self.getValue('Records per Buffer'))
# in hardware loop mode, ignore records and use number of sequences
if n_seq > 0:
nSeg = n_seq
else:
nSeg = int(self.getValue('Number of records'))
nAv = int(self.getValue('Number of averages'))
# trigger delay is in 1/sample rate
nTrigDelay = int(round(self.getValue('Trig Delay') / self.dt))
# special high-speed FPGA mode, don't convert, just transfer
if (self.fpga_config == 'Only FPGA I/Q' and
self.getValue('Hide I/Q') and
not self.getValue('Convert data while streaming')):
only_transfer_fgpa = True
else:
only_transfer_fgpa = False
if bArm:
# clear old data
self.dig.DAQflushMultiple(iChMask)
self.lTrace = [np.array([])] * self.nCh
self.smsb_info_str = []
self.demod_counter = 0
# only re-allocate large output matrix if necessary (slow)
if self.demod_output_ssb.size != (self.num_of_demods * nSeg * nAv):
self.demod_output_ssb = np.zeros(
(self.num_of_demods, nSeg * nAv), dtype='complex')
else:
# matrix has right size, just reshape
self.demod_output_ssb = self.demod_output_ssb.reshape(
(self.num_of_demods, nSeg * nAv))
# create new binary demod data buffer, if size changed
buf = (nPts * nSeg * nAv) if only_transfer_fgpa else (nPts * nSeg)
if self.demod_buffer.size != buf:
self.demod_buffer = np.zeros(buf, dtype=np.int16)
# only initiate diagnostic traces if in use
if not self.getValue('Hide I/Q'):
self.demod_output_vector_I = np.zeros(
[self.num_of_demods, nSeg], dtype='complex')
self.demod_output_I = np.zeros(
self.num_of_demods, dtype='complex')
self.demod_output_vector_Q = np.zeros(
[self.num_of_demods, nSeg], dtype='complex')
self.demod_output_Q = np.zeros(
self.num_of_demods, dtype='complex')
self.demod_output_vector_ref = np.zeros(
[self.num_of_demods, nSeg], dtype='complex')
self.demod_output_ref = np.zeros(
self.num_of_demods, dtype='complex')
self.demod_output_SSB = np.zeros(
self.num_of_demods, dtype='complex')
self.demod_output_vector_NP = np.zeros(
[self.num_of_demods, nSeg])
self.demod_output_NP = np.zeros(self.num_of_demods)
self.moment_I2 = np.zeros(
[self.num_of_demods, nSeg], dtype='complex')
self.moment_Q2 = np.zeros(
[self.num_of_demods, nSeg], dtype='complex')
# configure trigger for all active channels
for nCh in lCh:
self.lTrace[nCh] = np.zeros((nSeg * nPts))
# channel number depens on hardware version
ch = self.getHwCh(nCh)
# extra config for trig mode
if self.getValue('Trig Mode') == 'Digital trigger':
extSource = int(
self.getCmdStringFromValue('External Trig Source'))
trigBehavior = int(
self.getCmdStringFromValue('External Trig Config'))
sync = int(self.getCmdStringFromValue('Trig Sync Mode'))
self.dig.DAQtriggerExternalConfig(
ch, extSource, trigBehavior, sync)
self.dig.DAQdigitalTriggerConfig(
ch, extSource, trigBehavior)
elif self.getValue('Trig Mode') == 'Analog channel':
digitalTriggerMode = 0
digitalTriggerSource = 0
trigCh = self.getValueIndex('Analog Trig Channel')
analogTriggerMask = 2**trigCh
#analogTriggerMask = int('1111',2)
self.dig.DAQdigitalTriggerConfig(
ch, digitalTriggerSource, digitalTriggerMode)
self.dig.DAQanalogTriggerConfig(
ch, analogTriggerMask)
# config daq and trig mode
trigMode = int(self.getCmdStringFromValue('Trig Mode'))
self.dig.DAQconfig(ch, nPts, nSeg * nAv, nTrigDelay, trigMode) # TODO change nPts
# start acquiring data
self.dig.DAQstartMultiple(iChMask)
#self.wait(1)
# lT.append('Start %.1f ms' % (1000*(time.clock()-t0)))
#
# return if not measure
if not bMeasure:
return
# define number of cycles to read at a time
nCycleTotal = nSeg * nAv
nCall = int(np.ceil(nCycleTotal / nCyclePerCall))
lScale = [(self.getRange(ch) / self.bitRange) for ch in range(self.nCh)]
# keep track of progress in percent
old_percent = -1
# self.log('nCall:' + str(nCall), level = 30)
# proceed depending on segment or not segment
if only_transfer_fgpa:
# just transfer fpga data, do conversion after to allow fast stream
ch = self.getHwCh(3)
count = 0
for n in range(nCall):
# number of cycles for this call, could be fewer for last call
nCycle = min(nCyclePerCall, nCycleTotal - (n * nCyclePerCall))
# channel number depens on hardware version
data = self.DAQread(self.dig, ch, nPts * nCycle,
int(3000 + self.timeout_ms / nCall)) # TODO change nPts
# stop if no data
if data.size == 0:
return
# store data in long vector, convert later
self.demod_buffer[count:(count + data.size)] = data
count += data.size
# report progress, only report integer percent
if nCall >= 1:
new_percent = int(100 * n / nCall)
if new_percent > old_percent:
old_percent = new_percent
self.reportStatus(
'Acquiring traces ({}%)'.format(new_percent) +
', FPGA Demod buffer: ' +
', '.join(self.smsb_info_str))
# break if stopped from outside
if self.isStopped():
break
# finally, get demod values
self.getDemodValues(self.demod_buffer, nPts, nSeg, nSeg)
elif nSeg <= 1:
# non-segmented acquisiton
for n in range(nCall):
# number of cycles for this call, could be fewer for last call
nCycle = min(nCyclePerCall, nCycleTotal - (n * nCyclePerCall))
# self.log('nCycle:' + str(nCycle), level = 30)
# capture traces one by one
for nCh in lCh:
# channel number depens on hardware version
ch = self.getHwCh(nCh)
data = self.DAQread(self.dig, ch, nPts * nCycle,
int(3000 + self.timeout_ms / nCall))
# stop if no data
if data.size == 0:
return
# different operation for signals vs demod data
if self.fpga_config == 'Only signals' or nCh < 3:
# average
data = data.reshape((nCycle, nPts)).mean(0)
# adjust scaling to account for summing averages
scale = lScale[nCh] * (nCycle / nAv)
# convert to voltage, add to total average
self.lTrace[nCh] += data * scale
else:
# for demod, immediately get demodulated values
self.getDemodValues(data, nPts, nSeg, nCycle)
# report progress, only report integer percent
if nCall >= 1:
new_percent = int(100 * n / nCall)
if new_percent > old_percent:
old_percent = new_percent
self.reportStatus(
'Acquiring traces ({}%)'.format(new_percent) +
', FPGA Demod buffer: ' +
', '.join(self.smsb_info_str))
# break if stopped from outside
if self.isStopped():
break
# lT.append('N: %d, Tot %.1f ms' % (n, 1000 * (time.clock() - t0)))
else:
# segmented acquisition, get calls per segment
(nCallSeg, extra_call) = divmod(nSeg, nCyclePerCall)
# pre-calculate list of cycles/call, last call may have more cycles
if nCallSeg == 0:
nCallSeg = 1
lCyclesSeg = [nSeg]
else:
lCyclesSeg = [nCyclePerCall] * nCallSeg
lCyclesSeg[-1] = nCyclePerCall + extra_call
# pre-calculate scale, should include scaling for averaging
lScale = np.array(lScale, dtype=float) / nAv
for n in range(nAv):
count = 0
# loop over number of calls per segment
for m, nCycle in enumerate(lCyclesSeg):
# capture traces one by one
for nCh in lCh:
# channel number depens on hardware version
ch = self.getHwCh(nCh)
data = self.DAQread(self.dig, ch, nPts * nCycle,
int(3000 + self.timeout_ms / nCall))
# stop if no data
if data.size == 0:
return
# different operation for signals vs demod data
if self.fpga_config == 'Only signals' or nCh < 3:
# standard operation, store data in one long vector
self.lTrace[nCh][count:(count + data.size)] += \
data * lScale[nCh]
else:
# store raw demod data, will be extracted later
self.demod_buffer[count:(count + data.size)] = data
count += data.size
# after one full set of records, convert demod data
if self.fpga_config != 'Only signals':
self.getDemodValues(self.demod_buffer, nPts, nSeg, nSeg)
# report progress, only report integer percent
if nAv >= 1:
new_percent = int(100 * n / nAv)
if new_percent > old_percent:
old_percent = new_percent
self.reportStatus(
'Acquiring traces ({}%)'.format(new_percent) +
', FPGA Demod buffer: ' +
', '.join(self.smsb_info_str))
# break if stopped from outside
if self.isStopped():
break
# at the end, convert binary data to I/Q values
if self.fpga_config != 'Only signals':
self.demod_output_ssb = self.demod_output_ssb.reshape(
(self.num_of_demods, nAv, nSeg))
# lT.append('N: %d, Tot %.1f ms' % (n, 1000 * (time.clock() - t0)))
# # log timing info
# self.log(': '.join(lT))
def getRange(self, ch):
"""Get channel range, as voltage. Index start at 0"""
rang = float(self.getCmdStringFromValue('Ch%d - Range' % (ch + 1)))
# range depends on impedance
if self.getValue('Ch%d - Impedance' % (ch + 1)) == 'High':
rang = rang * 2
# special case if range is .25, 0.5, or 1, scale to 0.2, .4, .8
if rang < 1.1:
rang *= 0.8
return rang
def DAQread(self, dig, nDAQ, nPoints, timeOut):
"""Read data diretly to numpy array"""
if dig._SD_Object__handle > 0:
if nPoints > 0:
data = (keysightSD1.c_short * nPoints)()
nPointsOut = dig._SD_Object__core_dll.SD_AIN_DAQread(
dig._SD_Object__handle, nDAQ, data, nPoints, timeOut)
if nPointsOut > 0:
return np.frombuffer(data, dtype=np.int16, count=nPoints)
else:
return np.array([], dtype=np.int16)
else:
return keysightSD1.SD_Error.INVALID_VALUE
else:
return keysightSD1.SD_Error.MODULE_NOT_OPENED
def getDemodValues(self, demod_raw, nPts, nSeg, nCycle):
"""get Demod IQ data from Ch1/2/3 Trace"""
accum_length = self.getValue('Integration time')
lScale = [(self.getRange(ch) / self.bitRange) for ch in range(self.nCh)]
self.smsb_info_str = []
nDemods = self.num_of_demods
use_phase_ref = self.getValue('Use phase reference signal')
for n in range(nDemods):
y1_lsb = demod_raw[((n * 15) + 0)::nPts]
y1_msb = demod_raw[((n * 15) + 1)::nPts]
x1_lsb = demod_raw[((n * 15) + 2)::nPts]
x1_msb = demod_raw[((n * 15) + 3)::nPts]
y1x1_smsb = demod_raw[((n * 15) + 4)::nPts]
x1_smsb = y1x1_smsb.astype('int8')
y1_smsb = y1x1_smsb.astype('int16') >> 8
y2_lsb = demod_raw[((n * 15) + 5)::nPts]
y2_msb = demod_raw[((n * 15) + 6)::nPts]
x2_lsb = demod_raw[((n * 15) + 7)::nPts]
x2_msb = demod_raw[((n * 15) + 8)::nPts]
y2x2_smsb = demod_raw[((n * 15) + 9)::nPts]
x2_smsb = y2x2_smsb.astype('int8')
y2_smsb = y2x2_smsb.astype('int16') >> 8
y1_int64 = (
y1_lsb.astype('uint16') + y1_msb.astype('uint16') * (2 ** 16) +
y1_smsb.astype('int8') * (2**32))
x1_int64 = (
x1_lsb.astype('uint16') + x1_msb.astype('uint16') * (2 ** 16) +
x1_smsb.astype('int8') * (2**32))
y2_int64 = (
y2_lsb.astype('uint16') + y2_msb.astype('uint16') * (2 ** 16) +
y2_smsb.astype('int8') * (2**32))
x2_int64 = (
x2_lsb.astype('uint16') + x2_msb.astype('uint16') * (2 ** 16) +
x2_smsb.astype('int8') * (2**32))
smsb_info = [np.max(np.abs(x1_smsb)), np.max(np.abs(y1_smsb)),
np.max(np.abs(x2_smsb)), np.max(np.abs(y2_smsb))]
smsb_temp_info_str = str(int(max(smsb_info) / 1.24)) + '%'
self.smsb_info_str.append(smsb_temp_info_str)
warning_thr = 124 # warning indication that overflow can occur
if np.any(np.array(smsb_info)) > warning_thr:
warning_str = (
'Warning! overflow may occur in FPGA demod block: %d, %s' %
(n, str(smsb_info)))
self.log(warning_str, level=30)
demod_temp_I = (
(x1_int64.astype('int64') + 1j * y1_int64.astype('int64')) /
2**43 / accum_length * lScale[0])
demod_temp_Q = (
(x2_int64.astype('int64') + 1j * y2_int64.astype('int64')) /
2**43 / accum_length * lScale[1])
# store final values in large array, get indices for current call
k = self.demod_counter
n_values = demod_temp_I.size
if self.getValue('LO freq %d' % (n + 1)) <= 0:
self.demod_output_ssb[n, k:(k + n_values)] = 0.5 * (
np.real(demod_temp_I) + np.imag(demod_temp_Q) -
1j * (np.imag(demod_temp_I) - | np.real(demod_temp_Q) | numpy.real |
import unittest
from unittest.mock import MagicMock, patch
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal
from pathlib import Path
from txrm2tiff.txrm_to_image import TxrmToImage, save_colour
from txrm2tiff import annotator
test_data_path = Path("/dls/science/groups/das/ExampleData/B24_test_data/")
test_file = test_data_path / "annotation_test" / "Xray_mosaic_F3C.xrm"
visit_path = test_data_path / "data" / "2019" / "cm98765-1"
raw_path = visit_path / "raw"
xm10_path = raw_path / "XMv10"
xm13_path = raw_path / "XMv13"
test_files = [
(test_file, ),
(xm13_path / 'Xray_mosaic_v13.xrm', ),
(xm13_path / 'Xray_mosaic_v13_interrupt.xrm', ),
(xm13_path / 'Xray_mosaic_7x7_v13.xrm', ),
(xm13_path / 'Xray_single_v13.xrm', ),
(xm13_path / 'tomo_v13_full.txrm', ),
(xm13_path / 'tomo_v13_full_noref.txrm', ),
(xm13_path / 'tomo_v13_interrupt.txrm', ),
(xm13_path / 'VLM_mosaic_v13.xrm', ),
(xm13_path / 'VLM_mosaic_v13_interrupt.xrm', ),
(xm13_path / 'VLM_grid_mosaic_large_v13.xrm', ),
(xm10_path / '12_Tomo_F4D_Area1_noref.txrm', ),
(xm10_path / 'VLM_mosaic.xrm', ),
(xm10_path / 'test_tomo2_e3C_full.txrm', ),
(xm10_path / 'Xray_mosaic_F5A.xrm', ),]
class TestAnnotator(unittest.TestCase):
@parameterized.expand(test_files)
@unittest.skipUnless(visit_path.exists(), "dls paths cannot be accessed")
def test_with_real_image(self, test_file):
output_file = test_file.parent / (test_file.stem + "_Annotated.tif")
converter = TxrmToImage()
converter.convert(test_file, custom_reference=None, ignore_reference=False, annotate=True)
ann_images = converter.get_annotated_images()
self.assertFalse(ann_images is None, msg="Image wasn't created")
save_colour(output_file, ann_images)
self.assertTrue(output_file.exists(), msg=f"File {output_file} doesn't exist")
output_file.unlink()
@patch('txrm2tiff.annotator.txrm_wrapper.read_stream')
def test_square(self, mocked_stream_reader):
fill = 125
im_size = 5
im = np.zeros((1, im_size, im_size))
x0, y0 = 1, 1
x1, y1 = 4, 4
ann = annotator.Annotator(im[0].shape[::-1])
with patch.object(ann, "_get_colour") as patched_colour:
with patch.object(ann, "_get_thickness") as patched_thickness:
patched_colour.return_value = (0, fill, 0, 255)
patched_thickness.return_value = 1
mocked_stream_reader.side_effect = [[x0], [y0], [x1], [y1]]
ole = MagicMock()
stream_stem = ""
ann._plot_rect(ole, stream_stem)
ann._saved_annotations = True
output = ann.apply_annotations(im)
expected_output = np.zeros((im_size, im_size, 3), dtype=np.uint8)
y0, y1 = im_size - 1 - y0, im_size - 1 - y1 # Invert y axis
expected_output[y0, x0:x1 + 1, 1] = fill
expected_output[y1:y0 + 1, x0, 1] = fill
expected_output[y1, x0:x1 + 1, 1] = fill
expected_output[y1:y0 + 1, x1, 1] = fill
assert_array_equal(output[0], expected_output)
@patch('txrm2tiff.annotator.txrm_wrapper.read_stream')
def test_line(self, mocked_stream_reader):
fill = 125
x0, x1 = 1, 4
y = 4
im_size = 5
im = np.zeros((1, im_size, im_size))
ann = annotator.Annotator(im[0].shape[::-1])
with patch.object(ann, "_get_colour") as patched_colour:
with patch.object(ann, "_get_thickness") as patched_thickness:
patched_colour.return_value = (0, fill, 0, 255)
patched_thickness.return_value = 1
mocked_stream_reader.side_effect = [[x0], [x1], [y], [y]]
ole = MagicMock()
stream_stem = ""
ann._plot_line(ole, stream_stem)
ann._saved_annotations = True
output = ann.apply_annotations(im)
expected_output = np.zeros((im_size, im_size, 3), dtype=np.uint8)
y = im_size - 1 - y # Invert y axis
expected_output[y, x0:x1 + 1, 1] = fill
assert_array_equal(output[0], expected_output)
@patch('txrm2tiff.annotator.txrm_wrapper.read_stream')
def test_ellipse(self, mocked_stream_reader):
fill = 125
xs = 0, 18
ys = 2, 16
x_mid = int(round(np.mean(xs)))
y_mid = int(round( | np.mean(ys) | numpy.mean |
# -*- coding:utf-8 -*-
"""
@version: 1.0
@author: kevin
@license: Apache Licence
@contact: <EMAIL>
@site:
@software: PyCharm Community Edition
@file: adios_train.py
@time: 17/05/03 17:39
"""
import json
import os
import time
from math import ceil
import numpy as np
import tensorflow as tf
import yaml
from keras import backend as K
from sklearn import linear_model as lm
from utils.data_helper import build_data_cv
from utils.hiso import HISO
from utils.metrics import (Average_precision, Coverage, Hamming_loss,
One_error, Ranking_loss, Construct_thresholds)
K.set_learning_phase(1)
def do_eval(sess, model, thres_model, eval_data, batch_size):
'''
eval test data for moedel.
:param sess:
:param model:
:param eval_data:
:param batch_size:
:return:
'''
K.set_learning_phase(0)
number_of_data = len(eval_data)
number_of_batch = ceil(number_of_data / batch_size)
Y0_labels, Y1_labels, Y0_probs, Y1_probs = [], [], [], []
eval_loss, eval_cnt = 0., 0.
for batch in range(number_of_batch):
start = batch_size * batch
end = start + min(batch_size, number_of_data - start)
eval_Y0_labels = [hml.top_label for hml in eval_data[start:end]]
eval_Y1_labels = [hml.bottom_label for hml in eval_data[start:end]]
curr_loss, eval_Y0_probs, eval_Y1_probs = sess.run(
[model.loss, model.Y0_probs, model.Y1_probs],
feed_dict={
model.wds: [hml.wds for hml in eval_data[start:end]],
model.pos: [hml.pos for hml in eval_data[start:end]],
model.Y0: eval_Y0_labels,
model.Y1: eval_Y1_labels
# K.learning_phase(): 0
})
eval_loss += curr_loss
eval_cnt += 1
Y0_labels.extend(eval_Y0_labels)
Y1_labels.extend(eval_Y1_labels)
Y0_probs.extend(eval_Y0_probs)
Y1_probs.extend(eval_Y1_probs)
# evaluation metrics
Y0_labels = np.array(Y0_labels)
Y1_labels = np.array(Y1_labels)
Y0_probs = np.array(Y0_probs)
Y1_probs = np.array(Y1_probs)
print('\n')
print('Y0 label:', Y0_labels[3])
print('Y0 probs:', Y0_probs[3])
print('Y1 probs:', Y1_probs[3])
print('Y1 label:', Y1_labels[3])
print('\n')
# probs to predict label over thresholds
# fit_threshold automatally
Y0_preds = Y0_probs >= 0.7
# T1 = thres_model.predict(Y1_probs)
Y1_preds = Y1_probs >= 0.3
loss_dict = {'eval_loss': eval_loss / eval_cnt, 'Y0': {}, 'Y1': {}}
# use eval
func_eval = [
'Hamming_loss', 'One_error', 'Ranking_loss', 'Coverage',
'Average_precision'
]
# 0: 伟哥的评判标准, 1:正确的评判标准
mode = 1
for func in func_eval:
if func == 'Hamming_loss':
loss_dict['Y0'][func] = eval(func)(Y0_labels, Y0_preds, mode=mode)
loss_dict['Y1'][func] = eval(func)(Y1_labels, Y1_preds, mode=mode)
else:
loss_dict['Y0'][func] = eval(func)(Y0_labels, Y0_probs, mode=mode)
loss_dict['Y1'][func] = eval(func)(Y1_labels, Y1_probs, mode=mode)
K.set_learning_phase(1)
return loss_dict
def train(params):
'''
训练模型入口
:param params: 模型参数 dict
:return:
'''
datas, voc, pos, max_length = build_data_cv(
file_path='../docs/data/HML_JD_ALL.new.dat',
voc_path='../docs/data/voc.json',
pos_path='../docs/data/pos.json',
cv=5)
# fill params
params['voc_size'] = len(voc)
params['pos_size'] = len(pos)
params['words']['dim'] = max_length
params['pos']['dim'] = max_length
print(json.dumps(params, indent=4))
# split test and train
test_datas = list(filter(lambda data: data.cv_n == 1, datas))
train_datas = list(filter(lambda data: data.cv_n != 1, datas))
print('train dataset: {}'.format(len(train_datas)))
print('test dataset: {}'.format(len(test_datas)))
print('max length: {}'.format(max_length))
# build model
timestamp = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime())
log_test_dir = '../docs/test/%s' % timestamp
log_train_dir = '../docs/train/%s' % timestamp
os.mkdir(log_test_dir)
os.mkdir(log_train_dir)
# log_baseline_dir = '../docs/baseline/'
# os.mkdir(log_baseline_dir)
loss_key = [
'Hamming_loss', 'One_error', 'Ranking_loss', 'Coverage',
'Average_precision'
]
# 设置gpu限制
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.per_process_gpu_memory_fraction = params['gpu_fraction']
number_of_training_data = len(train_datas)
batch_size = params['batch_size']
number_of_batch = int(ceil(number_of_training_data / batch_size))
# 保存最优模型
model_dir = params['model_dir'] + time.strftime("%Y-%m-%d-%H:%M:%S",
time.localtime())
os.mkdir(model_dir)
model_name = model_dir + '/' + params['model_name']
with tf.Session(config=config) as sess, tf.device('/gpu:1'):
hiso = HISO(params)
saver = tf.train.Saver(max_to_keep=4)
# baseline_writer = tf.summary.FileWriter(log_baseline_dir)
test_writer = tf.summary.FileWriter(log_test_dir)
train_writer = tf.summary.FileWriter(log_train_dir, sess.graph)
init_op = tf.global_variables_initializer()
sess.run(init_op)
step = 0
min_hamming_loss = 10
best_sess = sess
rig_labels, rig_probs = [], []
for epoch in range(params['epoch']):
# shuffle in each epoch
train_datas = | np.random.permutation(train_datas) | numpy.random.permutation |
# -*- coding: utf-8 -*-
"""
Created on Tues at some point in time
@author: bokorn
"""
import os
import numpy as np
import scipy.io as sio
from se3_distributions.eval.pose_error import *
from quat_math import quaternion_matrix
default_extend_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../datasets/ycb_extents.txt')
def getYCBThresholds(extent_file = default_extend_file):
assert os.path.exists(extent_file), \
'Path does not exist: {}'.format(extent_file)
num_classes = 22
extents = np.zeros((num_classes, 3), dtype=np.float32)
extents[1:,:] = | np.loadtxt(extent_file) | numpy.loadtxt |
# -*- coding: utf-8 -*-
"""
Created on Sat May 2 19:21:10 2020
@author: Nicolai
"""
from IKernelBase import IKernelBase
import numpy as np
class KernelGSin(IKernelBase):
"""
Implementation of abstract class IKernelBase where GSin Kernels are used.
.. math::
w e^{-y ((x_0 - c_0)^2 + (x_1 - c_1)^2)} sin(f ((x_0 - c_0)^2 + (x_1 - c_1)^2)-p)
Atributes
---------
_kernel_type: string
descriptive string of the kernel formula
_kernel_size: int
number of parameters to be optimised in one kernel
Methods
-------
kernel_type(): string
getter for the attribute kernel_type
kernel_size(): int
getter of the attribute kerel_size
solution(kernels, x): float
linear combination of kernels specified in arguemnt, evaluated at x
solution_x0(kernels, x): float
derivative with respect to x0 of linear combination of kernels specified in arguemnt, evaluated at x
solution_x1(kernels, x): float
derivative with respect to x1 of linear combination of kernels specified in arguemnt, evaluated at x
solution_x0_x0(kernels, x): float
second derivative with respect to x0 of linear combination of kernels specified in arguemnt, evaluated at x
solution_x1_x1(kernels, x): float
second derivative with respect to x1 of linear combination of kernels specified in arguemnt, evaluated at x
solution_x0_x1(kernels, x): float
second derivative with respect to x0,x1 of linear combination of kernels specified in arguemnt, evaluated at x
"""
def __init__(self):
self._kernel_type = "GSin Kernel: sum_{i}^{N}(w_i*e^(-y_i*((x_0 - c_0_i)^2 + (x_1 - c_1_i)^2)))sin(f_i*((x_0 - c_0_i)^2 + (x_1 - c_1_i)^2)-p_i)"
self._kernel_size = 6
@property
def kernel_type(self):
return self._kernel_type
@property
def kernel_size(self):
return self._kernel_size
def solution(self, kernels, x):
kernelNR, dim = kernels.shape
result = 0.0
for i in range(kernelNR):
r = (x[0] - kernels[i][2])**2 +(x[1] - kernels[i][3])**2
exp = -1*kernels[i][1]*(r)
s = np.sin(kernels[i][4]*r - kernels[i][5])
result += kernels[i][0] * np.e**exp * s
return result
def solution_x0(self, kernels, x):
kernelNR, dim = kernels.shape
result = 0.0
for i in range(kernelNR):
w = kernels[i][0]
g = kernels[i][1]
c0= kernels[i][2]
c1= kernels[i][3]
f = kernels[i][4]
p = kernels[i][5]
r2= (x[0] - c0)**2 + (x[1] - c1)**2
exp=(x[0] - c0)*np.e**(-g*(r2))
result += 2*w*g*exp*np.sin(p - f*(r2)) + 2*w*f*exp*np.cos(p - f*(r2))
return result
def solution_x1(self, kernels, x):
kernelNR, dim = kernels.shape
result = 0.0
for i in range(kernelNR):
w = kernels[i][0]
g = kernels[i][1]
c0= kernels[i][2]
c1= kernels[i][3]
f = kernels[i][4]
p = kernels[i][5]
r2= (x[0] - c0)**2 + (x[1] - c1)**2
exp=(x[1] - c1)*np.e**(-g*(r2))
result += 2*w*g*exp*np.sin(p - f*(r2)) + 2*w*f*exp*np.cos(p - f*(r2))
return result
def solution_x0_x1(self, kernels, x):
kernelNR, dim = kernels.shape
result = 0.0
y = x[1]
x = x[0]
for i in range(kernelNR):
w = kernels[i][0]
g = kernels[i][1]
c0= kernels[i][2]
c1= kernels[i][3]
f = kernels[i][4]
p = kernels[i][5]
r2= (c0 - x)**2 + (c1 - y)**2
result += 4*w*(c0 - x)*(c1 - y)*np.e**(-g*(r2))*((f**2 - g**2)* \
np.sin(p - f*(r2)) - 2*f*g*np.cos(p - f*(r2)))
return result
def solution_x0_x0(self, kernels, x):
kernelNR, dim = kernels.shape
result = 0.0
y = x[1]
x = x[0]
for i in range(kernelNR):
w = kernels[i][0]
g = kernels[i][1]
c0= kernels[i][2]
c1= kernels[i][3]
f = kernels[i][4]
p = kernels[i][5]
r2= (c0 - x)**2 + (c1 - y)**2
result += 2*w*np.e**(-g*(r2))*((2*c0**2*(f**2 - g**2) + 4*c0*x*(g**2 - f**2) \
+ 2*f**2*x**2 - 2*g**2*x**2 + g)*np.sin(p - f*(r2)) + \
f*(-4*c0**2*g + 8*c0*g*x - 4*g*x**2 + 1)*np.cos(p - f*(r2)))
return result
def solution_x1_x1(self, kernels, x):
kernelNR, dim = kernels.shape
result = 0.0
y = x[1]
x = x[0]
for i in range(kernelNR):
w = kernels[i][0]
g = kernels[i][1]
c0= kernels[i][2]
c1= kernels[i][3]
f = kernels[i][4]
p = kernels[i][5]
r2= (c1 - y)**2 + (c0 - x)**2
result += 2*w*np.e**(-g*(r2))*((2*c1**2*(f**2 - g**2) + 4*c1*y*(g**2 - f**2) \
+ 2*f**2*y**2 - 2*g**2*y**2 + g)*np.sin(p - f*(r2)) + \
f*(-4*c1**2*g + 8*c1*g*y - 4*g*y**2 + 1)*np.cos(p - f*(r2)))
return result
if __name__ == "__main__":
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
print("start test")
kgs = KernelGSin()
print(kgs.kernel_type)
candidate_1 = np.array([1,1,0,0,1,0])
candidate_1_reshaped = candidate_1.reshape((1,6))
# show solution
print("show solution")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(-3, 3.1, 0.005)
X, Y = np.meshgrid(x, y)
zs0 = np.array([kgs.solution(candidate_1_reshaped, \
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0,X1)")
plt.savefig("gsk.pdf")
plt.show()
# show derivative with respect to x0
print("show derivative to x0")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(-5, 5.1, 0.1)
X, Y = np.meshgrid(x, y)
zs0 = np.array([kgs.solution_x0(candidate_1_reshaped, \
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0,X1)")
plt.show()
# show derivative with respect to x1
print("show derivative to x1")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(-5, 5.1, 0.1)
X, Y = np.meshgrid(x, y)
zs0 = np.array([kgs.solution_x1(candidate_1_reshaped, \
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0,X1)")
plt.show()
# show derivative with respect to x0 x0
print("show derivative to x0 x0")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(-5, 5.1, 0.1)
X, Y = np.meshgrid(x, y)
zs0 = np.array([kgs.solution_x0_x0(candidate_1_reshaped, \
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0,X1)")
plt.show()
print("show derivative to x0 x1")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(-5, 5.1, 0.1)
X, Y = np.meshgrid(x, y)
zs0 = np.array([kgs.solution_x0_x1(candidate_1_reshaped, \
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0,X1)")
plt.show()
# show derivative with respect to x0 x1
print("show derivative to x1 x1")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(-5, 5.1, 0.1)
X, Y = np.meshgrid(x, y)
zs0 = np.array([kgs.solution_x1_x1(candidate_1_reshaped, \
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0,X1)")
plt.show()
print("overflow test")
candidate_o = np.array([[69.13155327, -59.50487635, 63.13495401, 72.31468988, 24.31468981, 4.8112859],
[12.9604027 , -76.7379638 , 55.64266812, 91.56222343, 14.31468982, 8.4634546],
[83.12853572, -60.83721539, 3.36485524, 51.36506458, 65.31468983, 2.4894392],
[79.46589204, -16.83238165, 13.40452466, 78.59279995, 34.31628983, 5.8237846],
[80.61433144, -45.23737621, 9.77667237, 93.48153471, 31.31864989, 3.4890687]])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(-5, 5.1, 0.1)
X, Y = np.meshgrid(x, y)
zs0 = np.array([kgs.solution(candidate_o, \
np.array([x,y])) for x,y in zip(np.ravel(X), np.ravel(Y))])
print("max(zs0) = {}".format(max(zs0)))
print("min(zs0) = {}".format(min(zs0)))
Z = zs0.reshape(X.shape)
ax.plot_surface(X, Y, Z, cmap=cm.gnuplot)
ax.set_xlabel("X0")
ax.set_ylabel("X1")
ax.set_zlabel("f(X0,X1)")
plt.show()
print("underflow test")
candidate_u = np.array([[69.13155327, 59.50487635, 63.13495401, 72.31468988, 24.31468981, 4.8112859],
[12.9604027 , 76.7379638 , 55.64266812, 91.56222343, 14.31468982, 8.4634546],
[83.12853572, 60.83721539, 3.36485524, 51.36506458, 65.31468983, 2.4894392],
[79.46589204, 16.83238165, 13.40452466, 78.59279995, 34.31628983, 5.8237846],
[80.61433144, 45.23737621, 9.77667237, 93.48153471, 31.31864989, 3.4890687]])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(-5, 5.1, 0.1)
X, Y = | np.meshgrid(x, y) | numpy.meshgrid |
'''
auralib module for AVO functions and related calculations
Written by: <NAME>
Created: Approx. 2010
Last Mod: April 28, 2011
'''
import numpy as np
def ray_param(v, theta):
'''
Returns the ray parameter p
Usage:
p = ray_param(v, theta)
Inputs:
v = interval velocity
theta = incidence angle of ray (degrees)
Output:
p = ray parameter (i.e. sin(theta)/v )
'''
p = np.sin( np.deg2rad(theta) ) / v # ray parameter calculation
return p
def Rpp_bortfeld(theta1, vp_in, vs_in, rho_in):
"""
Calculate angle dependent p-wave to p-wave reflection coefficients using
Bortfeld's approximation to Zoeppritz P-P reflectivity.
Reference: Rock Physics Handbook, Mavko et al.
"""
vp_in = np.array(vp_in)
vs_in = np.array(vs_in)
rho_in = np.array(rho_in)
vp1 = vp_in[:-1]
vp2 = vp_in[1:]
vs1 = vs_in[:-1]
vs2 = vs_in[1:]
rho1 = rho_in[:-1]
rho2 = rho_in[1:]
theta1 = np.deg2rad(theta1)
p = np.sin(theta1)/vp1
theta2 = np.arcsin(vp2*p)
phi1 = np.arcsin(vs1*p)
phi2 = np.arcsin(vs2*p)
a = 1/2*np.log((vp2*rho2*np.cos(theta1))/(vp1*rho1*np.cos(theta2)))
b = (np.sin(theta1)/vp1)**2 * (vs1**2-vs2**2)
c = 2 + np.log(rho2/rho1)/np.log(vs2/vs1)
Rpp_bort = a + b*c
return Rpp_bort
def Rpp_akirichards(theta_in, vp_in, vs_in, rho_in, theta_mode='average'):
"""
Calculate angle dependent p-wave to p-wave reflection coefficients using
Aki & Richards approximation to Zoeppritz P-P reflectivity.
Reference: Quantitative Seismology, Aki & Richards
"""
vp_in = np.array(vp_in)
vs_in = np.array(vs_in)
rho_in = np.array(rho_in)
vp1 = vp_in[:-1]
vp2 = vp_in[1:]
dvp = vp2 - vp1
vp = (vp1 + vp2)/2
vs1 = vs_in[:-1]
vs2 = vs_in[1:]
dvs = vs2 - vs1
vs = (vs1 + vs2)/2
rho1 = rho_in[:-1]
rho2 = rho_in[1:]
drho = rho2 - rho1
rho = (rho1 + rho2)/2
theta_i = np.array(theta_in)
theta_ir = np.deg2rad(theta_i)
theta_tr = np.arcsin(vp2/vp1*np.sin(theta_ir))
theta_r = (theta_ir + theta_tr)/2
if theta_mode == 'average':
a = 1/2 * (1 - 4*(vs/vp)**2*np.sin(theta_r)**2)
b = 1/(2*np.cos(theta_r)**2)
c = -4*(vs/vp)**2*np.sin(theta_r)**2
elif theta_mode == 'incident':
a = 1/2 * (1 - 4*(vs/vp)**2*np.sin(theta_ir)**2)
b = 1/(2*np.cos(theta_ir)**2)
c = -4*(vs/vp)**2*np.sin(theta_ir)**2
Rpp_ar = a*drho/rho + b*dvp/vp + c*dvs/vs
return Rpp_ar
def Rps_akirichards(theta_in, vp_in, vs_in, rho_in, theta_mode='average'):
"""
Calculate angle dependent p-wave to s-wave reflection coefficients using
Aki & Richards approximation to Zoeppritz P-Sv reflectivity.
Reference: Quantitative Seismology, Aki & Richards
"""
vp_in = np.array(vp_in)
vs_in = np.array(vs_in)
rho_in = np.array(rho_in)
vp1 = vp_in[:-1]
vp2 = vp_in[1:]
dvp = vp2 - vp1
vp = (vp1 + vp2)/2
vs1 = vs_in[:-1]
vs2 = vs_in[1:]
dvs = vs2 - vs1
vs = (vs1 + vs2)/2
rho1 = rho_in[:-1]
rho2 = rho_in[1:]
drho = rho2 - rho1
rho = (rho1 + rho2)/2
theta1 = np.deg2rad(theta_in)
p = np.sin(theta1)/vp1
theta2 = np.arcsin(vp2*p)
phi1 = np.arcsin(vs1*p)
phi2 = np.arcsin(vs2*p)
theta = np.arctan((theta2-theta1)/(dvp/vp))
phi = np.arctan((phi2-phi1)/(dvs/vs))
if theta_mode == 'average':
a = -p*vp/(2*np.cos(phi))
b = 1 - 2*vs**2*p**2 + 2*vs**2*(np.cos(theta)/vp)*(np.cos(phi)/vs)
c = -4*vs**2 * (p**2 - (np.cos(theta)/vp) * (np.cos(phi)/vs))
if theta_mode == 'incident':
a = -p*vp/(2*np.cos(phi1))
b = (1-2*vs**2*p**2) + 2*vs**2 * np.cos(theta1)/vp * np.cos(phi1)/vs
c = c = -4*vs**2 * (p**2 - (np.cos(theta1)/vp) * (np.cos(phi1)/vs))
Rps_ar = a*(b*drho/rho + c*dvs/vs)
return Rps_ar
def Rpp_gelfandlarner(theta_in, vp_in, vs_in, rho_in):
"""
Function to calculate P-P reflectivity using Gelfand and Larner's
form of Aki-Richards P-P reflectivity approximation.
Reference: AVO Theory doc, Hampson-Russell Software Services Ltd.
http://www.ipt.ntnu.no/pyrex/stash/avo_theory.pdf
"""
vp_in = np.array(vp_in)
vs_in = np.array(vs_in)
rho_in = np.array(rho_in)
vp1 = vp_in[:-1]
vp2 = vp_in[1:]
dvp = vp2 - vp1
vp = (vp1 + vp2)/2
vs1 = vs_in[:-1]
vs2 = vs_in[1:]
dvs = vs2 - vs1
vs = (vs1 + vs2)/2
rho1 = rho_in[:-1]
rho2 = rho_in[1:]
drho = rho2 - rho1
rho = (rho1 + rho2)/2
theta_i = np.array(theta_in)
theta_ir = np.deg2rad(theta_i)
theta_tr = np.arcsin(vp2/vp1*np.sin(theta_ir))
theta_r = (theta_ir + theta_tr)/2
Rp = 1/2*(dvp/vp + drho/rho)
Rs = 1/2*(dvs/vs + drho/rho)
Rpp_gl = Rp + (Rp-2*Rs)*np.sin(theta_r)**2
return Rpp_gl
def Rpp_hilterman(theta_in, vp_in, vs_in, rho_in):
"""
Function for calculating P-P reflectivity using Hilterman's approximation
to Zoeppritz P-P reflectivity.
Reference:
"""
vp_in = np.array(vp_in)
vs_in = np.array(vs_in)
rho_in = np.array(rho_in)
vp1 = vp_in[:-1]
vp2 = vp_in[1:]
dvp = vp2 - vp1
vp = (vp1 + vp2)/2
vs1 = vs_in[:-1]
vs2 = vs_in[1:]
dvs = vs2 - vs1
vs = (vs1 + vs2)/2
rho1 = rho_in[:-1]
rho2 = rho_in[1:]
drho = rho2 - rho1
rho = (rho1 + rho2)/2
prat1 = ((vp1/vs1)**2-2)/(2*(vp1/vs1)**2-1)
prat2 = ((vp2/vs2)**2-2)/(2*(vp2/vs2)**2-1)
dprat = prat2 - prat1
prat = (prat1 + prat2)/2
theta_i = np.array(theta_in)
theta_ir = np.deg2rad(theta_i)
theta_tr = np.arcsin(vp2/vp1*np.sin(theta_ir))
theta_r = (theta_ir + theta_tr)/2
Rp = 1/2*(dvp/vp + drho/rho)
Rpp_hilt = Rp + 9/4*dprat*np.sin(theta_r)**2
return Rpp_hilt
def Rpp_wiggins(vp1, vs1, rho1, vp2, vs2, rho2, theta1, terms=3):
'''
Wiggins' approximation to Zoeppritz PP reflectivity.
theta1 in radians
'''
# Calculate some constants...
P = ray_param(vp1, theta1)
theta2 = np.arcsin( vp2*P )
theta = (theta1+theta2)/2.0
dRho = rho2-rho1
dVp = vp2-vp1
dVs = vs2-vs1
Rho = (rho1+rho2)/2.0
Vp = (vp1+vp2)/2.0
Vs = (vs1+vs2)/2.0
K = Vs/Vp
a = 1.0
b = np.sin(theta)**2.0
c = np.sin(theta)**2.0 * np.tan(theta)**2.0
Rp0 = 0.5*(dVp/Vp + dRho/Rho)
G = 0.5*dVp/Vp - 4.0*K**2.0*dVs/Vs - 2.0*K**2.0*dRho/Rho
C = 0.5*dVp/Vp
if terms == 2:
Rpp = a*Rp0 + b*G
elif terms == 3:
Rpp = a*Rp0 + b*G + c*C
return Rpp
def Rpp_smithgidlow(theta_in, vp_in, vs_in, rho_in, theta_mode='incident'):
"""
Function for calculating P-P reflectivity using Smith-Gidlow P-P reflectivity
approximation.
"""
vp_in = np.array(vp_in)
vs_in = np.array(vs_in)
rho_in = np.array(rho_in)
vp1 = vp_in[:-1]
vp2 = vp_in[1:]
dvp = vp2 - vp1
vp = (vp1 + vp2)/2
vs1 = vs_in[:-1]
vs2 = vs_in[1:]
dvs = vs2 - vs1
vs = (vs1 + vs2)/2
rho1 = rho_in[:-1]
rho2 = rho_in[1:]
drho = rho2 - rho1
rho = (rho1 + rho2)/2
Ip1 = vp1*rho1
Ip2 = vp2*rho2
dIp = Ip2 - Ip1
Ip = (Ip1 + Ip2)/2
Is1 = vs1*rho2
Is2 = vs2*rho2
dIs = Is2 - Is1
Is = (Is1 + Is2)/2
theta_i = np.array(theta_in)
theta_ir = np.deg2rad(theta_i)
theta_tr = np.arcsin(vp2/vp1*np.sin(theta_ir))
if theta_mode == 'average':
theta_r = (theta_ir + theta_tr)/2
elif theta_mode == 'incident':
theta_r = theta_ir
k = (vs/vp)**2
A = (5/8 - 0.5*k*np.sin(theta_r)**2 + 0.5*np.tan(theta_r)**2)
B = (-4*k*np.sin(theta_r)**2)
Rpp_sg = A*dvp/vp + B*dvs/vs
return Rpp_sg
def Rpp_fatti(theta_in, vp_in, vs_in, rho_in, num_terms=3, theta_mode='incident'):
"""
Function for calculating P-P reflectivity using Fatti's P-P reflectivity
approximation.
"""
vp_in = np.array(vp_in)
vs_in = np.array(vs_in)
rho_in = np.array(rho_in)
vp1 = vp_in[:-1]
vp2 = vp_in[1:]
dvp = vp2 - vp1
vp = (vp1 + vp2)/2
vs1 = vs_in[:-1]
vs2 = vs_in[1:]
dvs = vs2 - vs1
vs = (vs1 + vs2)/2
rho1 = rho_in[:-1]
rho2 = rho_in[1:]
drho = rho2 - rho1
rho = (rho1 + rho2)/2
Ip1 = vp1*rho1
Ip2 = vp2*rho2
dIp = Ip2 - Ip1
Ip = (Ip1 + Ip2)/2
Is1 = vs1*rho2
Is2 = vs2*rho2
dIs = Is2 - Is1
Is = (Is1 + Is2)/2
theta_i = np.array(theta_in)
theta_ir = np.deg2rad(theta_i)
theta_tr = np.arcsin(vp2/vp1*np.sin(theta_ir))
if theta_mode == 'average':
theta_r = (theta_ir + theta_tr)/2
elif theta_mode == 'incident':
theta_r = theta_ir
k = (vs/vp)**2
term1 = 0.5*dIp/Ip*(1 - np.tan(theta_r)**2)
term2 = -4*k*dIs/Is*np.sin(theta_r)**2
term3 = -(0.5*drho/rho*np.tan(theta_r)**2 - 2*k*drho/rho*np.sin(theta_r)**2)
if num_terms == 3:
Rpp_fat = term1 + term2 + term3
elif num_terms == 2:
Rpp_fat = term1 + term2
return Rpp_fat
def Rpp_shuey(theta_in, vp_in, vs_in, rho_in, num_terms=2, theta_mode='average'):
"""
Calculate angle dependent p-wave to p-wave reflection coefficients using
Shuey's rearrangement of Aki & Richards approximation of .
Reference: Shuey, 1985 (Geophysics)
"""
vp_in = np.array(vp_in)
vs_in = np.array(vs_in)
rho_in = np.array(rho_in)
vp1 = vp_in[:-1]
vp2 = vp_in[1:]
dvp = vp2 - vp1
vp = (vp1 + vp2)/2
vs1 = vs_in[:-1]
vs2 = vs_in[1:]
dvs = vs2 - vs1
vs = (vs1 + vs2)/2
rho1 = rho_in[:-1]
rho2 = rho_in[1:]
drho = rho2 - rho1
rho = (rho1 + rho2)/2
prat1 = (vp1**2-2*vs1**2)/(2*(vp1**2-vs1**2))
prat2 = (vp2**2-2*vs2**2)/(2*(vp2**2-vs2**2))
dprat = prat2 - prat1
prat = (prat1 + prat2)/2
theta_i = np.array(theta_in)
theta_ir = np.deg2rad(theta_i)
theta_tr = np.arcsin(vp2/vp1*np.sin(theta_ir))
if theta_mode == 'average':
theta_r = (theta_ir + theta_tr)/2
elif theta_mode == 'incident':
theta_r = theta_ir
Rp = 1/2*(dvp/vp + drho/rho)
B = (dvp/vp)/(dvp/vp + drho/rho)
A0 = B - 2*(1+B)*(1-2*prat)/(1-prat)
A = A0 + 1/(1-prat)**2 * dprat/Rp
if num_terms==3:
Rpp_shuey = Rp + (Rp*A0 + dprat/((1-dprat)**2))*np.sin(theta_r)**2 + \
1/2*dvp/vp*(np.tan(theta_r)**2 - np.sin(theta_r)**2)
elif num_terms==2:
Rpp_shuey = Rp + (Rp*A0 + dprat/((1-dprat)**2))*np.sin(theta_r)**2
return Rpp_shuey
def rc_zoep(vp1, vs1, rho1, vp2, vs2, rho2, theta):
'''
Reflection & Transmission coefficients calculated using full Zoeppritz
equations.
'''
vp1 = float(vp1)
vp2 = float(vp2)
vs1 = float(vs1)
vs2 = float(vs2)
rho1 = float(rho1)
rho2 = float(rho2)
theta = float(theta)
# Calculate reflection & transmission angles
theta1 = np.deg2rad(theta)
p = ray_param(vp1, theta1) # Ray parameter
theta2 = np.arcsin(p*vp2); # Transmission angle of P-wave
phi1 = np.arcsin(p*vs1); # Reflection angle of converted S-wave
phi2 = np.arcsin(p*vs2); # Transmission angle of converted S-wave
M = np.array([ \
[-np.sin(theta1), -np.cos(phi1), np.sin(theta2), np.cos(phi2)],
[np.cos(theta1), -np.sin(phi1), np.cos(theta2), -np.sin(phi2)],
[2.0*rho1*vs1*np.sin(phi1)*np.cos(theta1), rho1*vs1*(1.0-2.0*np.sin(phi1)**2.0),
2.0*rho2*vs2*np.sin(phi2)*np.cos(theta2), rho2*vs2*(1.0-2.0*np.sin(phi2)**2.0)],
[-rho1*vp1*(1.0-2.0*np.sin(phi1)**2.0), rho1*vs1*np.sin(2.0*phi1),
rho2*vp2*(1.0-2.0*np.sin(phi2)**2.0), -rho2*vs2*np.sin(2.0*phi2)]
], dtype='float')
N = np.array([ \
[np.sin(theta1), np.cos(phi1), -np.sin(theta2), -np.cos(phi2)],
[np.cos(theta1), -np.sin(phi1), np.cos(theta2), -np.sin(phi2)],
[2.0*rho1*vs1*np.sin(phi1)*np.cos(theta1), rho1*vs1*(1.0-2.0*np.sin(phi1)**2.0),
2.0*rho2*vs2*np.sin(phi2)*np.cos(theta2), rho2*vs2*(1.0-2.0*np.sin(phi2)**2.0)],
[rho1*vp1*(1.0-2.0*np.sin(phi1)**2.0), -rho1*vs1*np.sin(2.0*phi1),
-rho2*vp2*(1.0-2.0*np.sin(phi2)**2.0), rho2*vs2*np.sin(2.0*phi2)]
], dtype='float')
# This is the important step, calculating coefficients for all modes and rays
Rzoep = np.dot(np.linalg.inv(M), N);
return Rzoep
def Rpp_zoeppritz(theta, vp_in, vs_in, rho_in):
"""
Calculate angle dependent p-wave to p-wave reflection coefficients using
Zoeppritz P-P reflectivity equation.
Reference: Aki & Richards
"""
vp_in = np.array(vp_in)
vs_in = np.array(vs_in)
rho_in = np.array(rho_in)
vp1 = vp_in[:-1]
vp2 = vp_in[1:]
vs1 = vs_in[:-1]
vs2 = vs_in[1:]
rho1 = rho_in[:-1]
rho2 = rho_in[1:]
theta1 = np.deg2rad(theta)
p = np.sin(theta1)/vp1
theta2 = np.arcsin(vp2*p)
phi1 = np.arcsin(vs1*p)
phi2 = np.arcsin(vs2*p)
a = rho2*(1-2*vs2**2*p**2) - rho1*(1-2*vs1**2*p**2)
b = rho2*(1-2*vs2**2*p**2) + 2*rho1*vs1**2*p**2
c = rho1*(1-2*vs1**2*p**2) + 2*rho2*vs2**2*p**2
d = 2*(rho2*vs2**2 - rho1*vs1**2)
E = b*np.cos(theta1)/vp1 + c*np.cos(theta2)/vp2
F = b*np.cos(phi1)/vs1 + c*np.cos(phi2)/vs2
G = a - d*np.cos(theta1)/vp1*np.cos(phi2)/vs2
H = a - d*np.cos(theta2)/vp2*np.cos(phi1)/vs1
D = E*F + G*H*p**2
Rpp_zoe = (b*np.cos(theta1)/vp1 - c*np.cos(theta2)/vp2)*F - \
(a+d*np.cos(theta1)/vp1*np.cos(phi2)/vs2)*H*p**2
Rpp_zoe = Rpp_zoe / D
return Rpp_zoe
def Rps_zoeppritz(theta, vp_in, vs_in, rho_in):
"""
Calculate angle dependent p-wave to s-wave reflection coefficients using
Zoeppritz P-Sv reflectivity equation.
Reference: Quantitative Seismology, Aki & Richards
"""
vp_in = np.array(vp_in)
vs_in = np.array(vs_in)
rho_in = np.array(rho_in)
vp1 = vp_in[:-1]
vp2 = vp_in[1:]
vs1 = vs_in[:-1]
vs2 = vs_in[1:]
rho1 = rho_in[:-1]
rho2 = rho_in[1:]
theta1 = np.deg2rad(theta)
p = np.sin(theta1)/vp1
theta2 = np.arcsin(vp2*p)
phi1 = np.arcsin(vs1*p)
phi2 = np.arcsin(vs2*p)
a = rho2*(1-2*vs2**2*p**2) - rho1*(1-2*vs1**2*p**2)
b = rho2*(1-2*vs2**2*p**2) + 2*rho1*vs1**2*p**2
c = rho1*(1-2*vs1**2*p**2) + 2*rho2*vs2**2*p**2
d = 2*(rho2*vs2**2 - rho1*vs1**2)
E = b*np.cos(theta1)/vp1 + c*np.cos(theta2)/vp2
F = b*np.cos(phi1)/vs1 + c*np.cos(phi2)/vs2
G = a - d*np.cos(theta1)/vp1*np.cos(phi2)/vs2
H = a - d*np.cos(theta2)/vp2*np.cos(phi1)/vs1
D = E*F + G*H*p**2.0
Rps_zoe = -2*np.cos(theta1)/vp1*(a*b + c*d*np.cos(theta2)/vp2 * np.cos(phi2)/vs2)*p*vp1/(vs1*D)
return Rps_zoe
def calc_eei(chi, vp, vs, rho, K='auto', norm='auto'):
"""
Calculate extended elastic impedance values.
"""
if K == 'auto':
K = np.nanmean((vs/vp)**2)
if norm == 'auto':
vp0 = np.nanmean(vp)
vs0 = | np.nanmean(vs) | numpy.nanmean |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Polynomial and Linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class PolynomialTest(tf.test.TestCase, parameterized.TestCase):
"""Test the Polynomial kernel."""
def test_mismatched_float_types_are_bad(self):
with self.assertRaises(TypeError):
tfp.math.psd_kernels.Polynomial(
bias_variance=np.float32(1.),
slope_variance=np.float64(1.),
shift=0.,
exponent=1.)
def testFloat32Fallback(self):
# Should be OK (float32 fallback).
self.polynomial = tfp.math.psd_kernels.Polynomial(
bias_variance=0, slope_variance=1, shift=0, exponent=1)
# Should be OK.
tfp.math.psd_kernels.Polynomial(
bias_variance=np.float32(1.), slope_variance=1., shift=0., exponent=1.)
def testValidateArgsNonPositiveAreBad(self):
with self.assertRaisesOpError('Condition x > 0 did not hold'):
k = tfp.math.psd_kernels.Polynomial(bias_variance=-1., validate_args=True)
self.evaluate(k.apply([1.], [1.]))
with self.assertRaisesOpError('Condition x > 0 did not hold'):
k = tfp.math.psd_kernels.Polynomial(
slope_variance=-1., validate_args=True)
self.evaluate(k.apply([1.], [1.]))
with self.assertRaisesOpError('Condition x > 0 did not hold'):
k = tfp.math.psd_kernels.Polynomial(exponent=-1., validate_args=True)
self.evaluate(k.apply([1.], [1.]))
def testShifttNonPositiveIsOk(self):
# No exception expected
k = tfp.math.psd_kernels.Polynomial(shift=-1., validate_args=True)
self.evaluate(k.apply([1.], [1.]))
def testValidateArgsNoneIsOk(self):
# No exception expected
k = tfp.math.psd_kernels.Polynomial(
bias_variance=None,
slope_variance=None,
shift=None,
exponent=None,
validate_args=True)
self.evaluate(k.apply([[1.]], [[1.]]))
def testNoneShapes(self):
k = tfp.math.psd_kernels.Polynomial(
bias_variance=np.reshape(np.arange(12.), [2, 3, 2]))
self.assertEqual([2, 3, 2], k.batch_shape.as_list())
@parameterized.named_parameters(
dict(
testcase_name='Shape [] kernel',
bias_variance=2.,
slope_variance=2.,
shift=2.,
exponent=2.,
shape=[]),
dict(
testcase_name='Shape [1] kernel',
bias_variance=[2.],
slope_variance=[2.],
shift=[2.],
exponent=[2.],
shape=[1]),
dict(
testcase_name='Shape [2] kernel',
bias_variance=[1., 2.],
slope_variance=[1., 2.],
shift=[1., 2.],
exponent=[1., 2.],
shape=[2]),
dict(
testcase_name='Shape [2, 1] kernel',
bias_variance=[[1.], [2.]],
slope_variance=[[1.], [2.]],
shift=[[1.], [2.]],
exponent=[[1.], [2.]],
shape=[2, 1]),
dict(
testcase_name='Shape [2, 1] broadcast kernel',
bias_variance=None,
slope_variance=2.,
shift=[2.],
exponent=[[1.], [2.]],
shape=[2, 1]))
def testBatchShape(self, bias_variance, slope_variance,
shift, exponent, shape):
k = tfp.math.psd_kernels.Polynomial(
bias_variance=bias_variance,
slope_variance=slope_variance,
shift=shift,
exponent=exponent,
validate_args=True)
self.assertAllEqual(shape, k.batch_shape.as_list())
self.assertAllEqual(shape, self.evaluate(k.batch_shape_tensor()))
def testFloat32(self):
# No exception expected
k = tfp.math.psd_kernels.Polynomial(
bias_variance=0.,
slope_variance=1.,
shift=0.,
exponent=1.,
feature_ndims=1)
x = | np.ones([5, 3], np.float32) | numpy.ones |
import numpy as np
import scipy.stats as stats
from UQpy.Distributions.baseclass import DistributionND
class MVNormal(DistributionND):
"""
Multivariate normal distribution having probability density function
.. math:: f(x) = \dfrac{1}{\sqrt{(2\pi)^k\det\Sigma}}\exp{-\dfrac{1}{2}(x-\mu)^T\Sigma^{-1}(x-\mu)}
where :math:`\mu` is the mean vector, :math:`\Sigma` is the covariance matrix, and :math:`k` is the dimension of
`x`.
**Inputs:**
* **mean** (`ndarray`):
mean vector, `ndarray` of shape `(dimension, )`
* **cov** (`float` or `ndarray`):
covariance, `float` or `ndarray` of shape `(dimension, )` or `(dimension, dimension)`. Default is 1.
The following methods are available for ``MVNormal``:
* ``cdf``, ``pdf``, ``log_pdf``, ``rvs``, ``fit``, ``moments``.
"""
def __init__(self, mean, cov=1.):
if mean is not None and cov is not None:
if len(np.array(mean).shape) != 1:
raise ValueError('Input mean must be a 1D array.')
if isinstance(cov, (int, float)):
pass
else:
if not (len(np.array(cov).shape) in [1, 2] and all(sh == len(mean) for sh in np.array(cov).shape)):
raise ValueError('Input cov must be a float or ndarray of appropriate dimensions.')
super().__init__(mean=mean, cov=cov, order_params=['mean', 'cov'])
def cdf(self, x):
cdf_val = stats.multivariate_normal.cdf(x=x, **self.params)
return np.atleast_1d(cdf_val)
def pdf(self, x):
pdf_val = stats.multivariate_normal.pdf(x=x, **self.params)
return np.atleast_1d(pdf_val)
def log_pdf(self, x):
logpdf_val = stats.multivariate_normal.logpdf(x=x, **self.params)
return np.atleast_1d(logpdf_val)
def rvs(self, nsamples=1, random_state=None):
if not (isinstance(nsamples, int) and nsamples >= 1):
raise ValueError('Input nsamples must be an integer > 0.')
return stats.multivariate_normal.rvs(
size=nsamples, random_state=random_state, **self.params).reshape((nsamples, -1))
def fit(self, data):
data = self._check_x_dimension(data)
mle_mu, mle_cov = self.params['mean'], self.params['cov']
if mle_mu is None:
mle_mu = np.mean(data, axis=0)
if mle_cov is None:
mle_cov = | np.cov(data, rowvar=False) | numpy.cov |
"""
Purpose
-------
A Portfolio represents a collection of Aggregate objects. Applications include
* Model a book of insurance
* Model a large account with several sub lines
* Model a reinsurance portfolio or large treaty
"""
import collections
import json
import logging
from copy import deepcopy
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
from pandas.io.formats.format import EngFormatter
import pypandoc
import scipy.stats as ss
from scipy.interpolate import interp1d
from IPython.core.display import HTML, display
from matplotlib.ticker import MultipleLocator, StrMethodFormatter, MaxNLocator, FixedLocator, \
FixedFormatter, AutoMinorLocator
from scipy import interpolate
import re
from pathlib import Path
from .distr import Aggregate, Severity
from .spectral import Distortion
from .utils import ft, \
ift, sln_fit, sgamma_fit, \
axiter_factory, AxisManager, html_title, \
suptitle_and_tight, \
MomentAggregator, Answer, subsets, round_bucket, report_time
# fontsize : int or float or {'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'}
matplotlib.rcParams['legend.fontsize'] = 'xx-small'
logger = logging.getLogger('aggregate')
# debug
# info
# warning
# error
# critical
class Portfolio(object):
"""
Portfolio creates and manages a portfolio of Aggregate objects.
:param name: the name of the portfolio, no spaces or underscores
:param spec_list: a list of 1) dictionary: Aggregate object dictionary specifications or
2) Aggregate: An actual aggregate objects or
3) tuple (type, dict) as returned by uw['name'] or
4) string: Names referencing objects in the optionally passed underwriter
"""
def __init__(self, name, spec_list, uw=None):
self.name = name
self.agg_list = []
self.line_names = []
logger.debug(f'Portfolio.__init__| creating new Portfolio {self.name}')
# logger.debug(f'Portfolio.__init__| creating new Portfolio {self.name} at {super(Portfolio, self).__repr__()}')
ma = MomentAggregator()
max_limit = 0
for spec in spec_list:
if isinstance(spec, Aggregate):
# directly passed in an agg object
a = spec
agg_name = spec.name
elif isinstance(spec, str):
# look up object in uw return actual instance
# note here you could do uw.aggregate[spec] and get the dictionary def
# or uw(spec) to return the already-created (and maybe updated) object
# we go the latter route...if user wants they can pull off the dict item themselves
if uw is None:
raise ValueError(f'Must pass valid Underwriter instance to create aggs by name')
try:
a = uw(spec)
except e:
logger.error(f'Item {spec} not found in your underwriter')
raise e
agg_name = a.name
elif isinstance(spec, tuple):
# uw returns type, spec
assert spec[0] == 'agg'
a = Aggregate(**spec[1])
agg_name = spec[1]['name']
elif isinstance(spec, dict):
a = Aggregate(**spec)
agg_name = spec['name'][0] if isinstance(spec['name'], list) else spec['name']
else:
raise ValueError(f'Invalid type {type(spec)} passed to Portfolio, expect Aggregate, str or dict.')
self.agg_list.append(a)
self.line_names.append(agg_name)
self.__setattr__(agg_name, a)
ma.add_fs(a.report_ser[('freq', 'ex1')], a.report_ser[('freq', 'ex2')], a.report_ser[('freq', 'ex3')],
a.report_ser[('sev', 'ex1')], a.report_ser[('sev', 'ex2')], a.report_ser[('sev', 'ex3')])
max_limit = max(max_limit, np.max(np.array(a.limit)))
self.line_names_ex = self.line_names + ['total']
for n in self.line_names:
# line names cannot equal total
if n == 'total':
raise ValueError('Line names cannot equal total, it is reserved for...total')
# make a pandas data frame of all the statistics_df
temp_report = pd.concat([a.report_ser for a in self.agg_list], axis=1)
# max_limit = np.inf # np.max([np.max(a.get('limit', np.inf)) for a in spec_list])
temp = pd.DataFrame(ma.stats_series('total', max_limit, 0.999, remix=False))
self.statistics_df = pd.concat([temp_report, temp], axis=1)
# future storage
self.density_df = None
self.augmented_df = None
self.epd_2_assets = {}
self.assets_2_epd = {}
self.priority_capital_df = None
self.priority_analysis_df = None
self.audit_df = None
self.padding = 0
self.tilt_amount = 0
self._linear_quantile_function = None
self._cdf = None
self._pdf = None
self._tail_var = None
self._tail_var2 = None
self._inverse_tail_var = None
self.bs = 0
self.log2 = 0
self.ex = 0
self.last_update = 0
self.hash_rep_at_last_update = ''
self._distortion = None
self.sev_calc = ''
self._remove_fuzz = 0
self.approx_type = ""
self.approx_freq_ge = 0
self.discretization_calc = ''
# for storing the info about the quantile function
self.q_temp = None
self._renamer = None
self._line_renamer = None
self._tm_renamer = None
# if created by uw it stores the program here
self.program = ''
self.audit_percentiles = [.9, .95, .99, .996, .999, .9999, 1 - 1e-6]
self.dists = None
self.dist_ans = None
def __str__(self):
"""
Goal: readability
:return:
"""
# cannot use ex, etc. because object may not have been updated
if self.audit_df is None:
ex = self.statistics_df.loc[('agg', 'mean'), 'total']
empex = np.nan
isupdated = False
else:
ex = self.get_stat(stat="Mean")
empex = self.get_stat()
isupdated = True
# df = pd.DataFrame(columns=['Statistic', 'Value'])
# df = df.set_index('Statistic')
# df.loc['Portfolio Name', 'Value'] = self.name
# df.loc['Expected loss', 'Value'] = ex
# df.loc['Model loss', 'Value'] = empex
# df.loc['Error', 'Value'] = ex / empex - 1
# print(df)
s = f'Portfolio name {self.name:<15s}\n' \
f'Theoretic expected loss {ex:15,.1f}\n' \
f'Actual expected loss {empex:15,.1f}\n' \
f'Error {empex / ex - 1:15.6f}\n' \
f'Discretization size {self.log2:15d}\n' \
f'Bucket size {self.bs:15.2f}\n' \
f'{object.__repr__(self)}'
if not isupdated:
s += '\nNOT UPDATED!'
return s
@property
def distortion(self):
return self._distortion
def remove_fuzz(self, df=None, eps=0, force=False, log=''):
"""
remove fuzz at threshold eps. if not passed use np.finfo(np.float).eps.
Apply to self.density_df unless df is not None
Only apply if self.remove_fuzz or force
:param eps:
:param df: apply to dataframe df, default = self.density_df
:param force: do regardless of self.remove_fuzz
:return:
"""
if df is None:
df = self.density_df
if eps == 0:
eps = np.finfo(np.float).eps
if self._remove_fuzz or force:
logger.debug(f'Portfolio.remove_fuzz | Removing fuzz from {self.name} dataframe, caller {log}')
df[df.select_dtypes(include=['float64']).columns] = \
df.select_dtypes(include=['float64']).applymap(lambda x: 0 if abs(x) < eps else x)
def __repr__(self):
"""
Goal unmbiguous
:return:
"""
# return str(self.to_dict())
# this messes up when port = self has been enhanced...
if isinstance(self, Portfolio):
s = [super(Portfolio, self).__repr__(), f"{{ 'name': '{self.name}'"]
else:
s = [f'Non-Portfolio (enhanced) object {{ "name": "{self.name}"']
agg_list = [str({k: v for k, v in a.__dict__.items() if k in Aggregate.aggregate_keys})
for a in self.agg_list]
s.append(f"'spec': [{', '.join(agg_list)}]")
if self.bs > 0:
s.append(f'"bs": {self.bs}')
s.append(f'"log2": {self.log2}')
s.append(f'"padding": {self.padding}')
s.append(f'"tilt_amount": {self.tilt_amount}')
s.append(f'"distortion": "{repr(self._distortion)}"')
s.append(f'"sev_calc": "{self.sev_calc}"')
s.append(f'"remove_fuzz": {self._remove_fuzz}')
s.append(f'"approx_type": "{self.approx_type}"')
s.append(f'"approx_freq_ge": {self.approx_freq_ge}')
return ', '.join(s) + '}'
def _repr_html_(self):
s = [f'<h2>Portfolio object: {self.name}</h2>']
_n = len(self.agg_list)
_s = "" if _n <= 1 else "s"
s.append(f'Portfolio contains {_n} aggregate component{_s}')
summary_sl = (slice(None), ['mean', 'cv', 'skew'])
if self.audit_df is not None:
_df = pd.concat((self.statistics_df.loc[summary_sl, :],
self.audit_df[['Mean', 'EmpMean', 'MeanErr', 'CV', 'EmpCV', 'CVErr', 'P99.0']].T),
sort=True)
s.append(_df._repr_html_())
else:
s.append(self.statistics_df.loc[summary_sl, :]._repr_html_())
return '\n'.join(s)
def __hash__(self):
"""
hashing behavior
:return:
"""
return hash(repr(self.__dict__))
def __iter__(self):
"""
make Portfolio iterable: for each x in Portfolio
:return:
"""
return iter(self.agg_list)
def __getitem__(self, item):
"""
allow Portfolio[slice] to return bits of agg_list
:param item:
:return:
"""
if type(item) == str:
return self.agg_list[self.line_names.index(item)]
return self.agg_list[item]
@property
def audit(self):
"""
Renamed version of the audit dataframe
:return:
"""
if self.audit_df is not None:
return self.audit_df.rename(columns=self.renamer, index=self.line_renamer).T
@property
def density(self):
"""
Renamed version of the density_df dataframe
:return:
"""
if self.density_df is not None:
return self.density_df.rename(columns=self.renamer)
@property
def augmented(self):
"""
Renamed version of the density_df dataframe
:return:
"""
if self.augmented_df is not None:
return self.augmented_df.rename(columns=self.renamer)
@property
def statistics(self):
"""
Renamed version of the statistics dataframe
:return:
"""
return self.statistics_df.rename(columns=self.renamer)
def json(self, stream=None):
"""
write object as json
:param stream:
:return: stream or text
"""
args = dict()
args["bs"] = self.bs
args["log2"] = self.log2
args["padding"] = self.padding
args["tilt_amount"] = self.tilt_amount
args["distortion"] = repr(self._distortion)
args["sev_calc"] = self.sev_calc
args["remove_fuzz"] = self._remove_fuzz
args["approx_type"] = self.approx_type
args["approx_freq_ge"] = self.approx_freq_ge
args["last_update"] = str(self.last_update)
args["hash_rep_at_last_update"] = str(self.hash_rep_at_last_update)
d = dict()
# original
# d[self.name] = dict(args=args, spec=[a.spec for a in self.agg_list])
d['name'] = self.name
d['args'] = args
d['spec_list'] = [a._spec for a in self.agg_list]
logger.debug(f'Portfolio.json| dummping {self.name} to {stream}')
s = json.dumps(d) # , default_flow_style=False, indent=4)
logger.debug(f'Portfolio.json | {s}')
if stream is None:
return s
else:
return stream.write(s)
def save(self, filename='', mode='a'):
"""
persist to json in filename; if none save to user.json
:param filename:
:param mode: for file open
:return:
"""
if filename == "":
filename = Path.home() / 'agg/user.json'
filename.parent.mkdir(parents=True, exist_ok=True)
with filename.open(mode=mode, encoding='utf-8') as f:
self.json(stream=f)
logger.debug(f'Portfolio.save | {self.name} saved to {filename}')
def __add__(self, other):
"""
Add two portfolio objects INDEPENDENT sum (down road can look for the same severity...)
:param other:
:return:
"""
assert isinstance(other, Portfolio)
new_spec = []
for a in self.agg_list:
c = deepcopy(a._spec)
c['name'] = c['name']
new_spec.append(c)
for a in other.agg_list:
c = deepcopy(a._spec)
c['name'] = c['name']
new_spec.append(c)
return Portfolio(f'({self.name}) + ({other.name})', new_spec)
def __rmul__(self, other):
"""
new = other * self; treat as scale change
:param other:
:return:
"""
assert other > 0
new_spec = []
for a in self.agg_list:
new_spec.append(deepcopy(a._spec))
for d in new_spec:
# d is a dictionary agg spec, need to adjust the severity
s = d['severity']
if 'mean' in s:
s['mean'] *= other
elif 'scale' in s:
s['scale'] *= other
else:
raise ValueError(f"Cannot adjust s['name'] for scale")
return Portfolio(f'{other} x {self.name}', new_spec)
def __mul__(self, other):
"""
new = self * other, other integer, sum of other independent copies
:param other:
:return:
"""
assert isinstance(other, int)
new_spec = []
for a in self.agg_list:
new_spec.append(deepcopy(a._spec))
for d in new_spec:
# d is a dictionary agg spec, need to adjust the frequency
# TODO better freq dists; deal with Bernoulli where n=log<1
d['frequency']['n'] *= other
return Portfolio(f'Sum of {other} copies of {self.name}', new_spec)
def snap(self, x):
"""
snap value x to the index of density_df
:param x:
:return:
"""
ix = self.density_df.index.get_loc(x, 'nearest')
return self.density_df.iat[ix, 0]
def audits(self, kind='all', **kwargs):
"""
produce audit plots to assess accuracy of outputs.
Currently only exeqa available
:param kind:
:param kwargs: passed to pandas plot, e.g. set xlim
:return:
"""
if kind == 'all':
kind = ['exeqa']
for k in kind:
if k == 'exeqa':
temp = self.density_df.filter(regex='exeqa_.*(?<!total)$').copy()
temp['sum'] = temp.sum(axis=1)
temp['err'] = temp['sum'] - temp.index
f, axs = plt.subplots(1, 2, figsize=(8, 3.75), constrained_layout=True)
ax = axs.flatten()
a = temp['err'].abs().plot(logy=True, title=f'Exeqa Sum Error', ax=ax[1], **kwargs)
a.plot(self.density_df.loss, self.density_df.p_total, label='p_total')
a.plot(self.density_df.loss, self.density_df.p_total * temp.err, label='prob wtd err')
a.grid('b')
a.legend(loc='lower left')
if 'xlim' in kwargs:
kwargs['ylim'] = kwargs['xlim']
temp.filter(regex='exeqa_.*(?<!total)$|sum').plot(title='exeqa and sum of parts', ax=ax[0],
**kwargs).grid('b')
f.suptitle(f'E[Xi | X=x] vs. Sum of Parts\nbs={self.bs}, log2={self.log2}, padding={self.padding}',
fontsize='x-large')
return f # for doc maker
def get_stat(self, line='total', stat='EmpMean'):
"""
Other analysis suggests that iloc and iat are about same speed but slower than ix
:param line:
:param stat:
:return:
"""
return self.audit_df.loc[line, stat]
def q(self, p, kind='lower'):
"""
return lowest quantile, appropriate for discrete bucketing.
quantile guaranteed to be in the index
nearest does not work because you always want to pick rounding up
Definition 2.1 (Quantiles)
x(α) = qα(X) = inf{x ∈ R : P[X ≤ x] ≥ α} is the lower α-quantile of X
x(α) = qα(X) = inf{x ∈ R : P[X ≤ x] > α} is the upper α-quantile of X.
We use the x-notation if the dependence on X is evident, otherwise the q-notion.
Acerbi and Tasche (2002)
:param p:
:param kind: allow upper or lower quantiles
:return:
"""
if self._linear_quantile_function is None:
# revised Dec 2019
self._linear_quantile_function = {}
self.q_temp = self.density_df[['loss', 'F']].groupby('F').agg({'loss': np.min})
self.q_temp.loc[1, 'loss'] = self.q_temp.loss.iloc[-1]
self.q_temp.loc[0, 'loss'] = 0
# revised Jan 2020
# F loss loss_s
# 0.000000 0.0 0.0
# 0.667617 0.0 4500.0
# a value here is V and ^ which is the same: correct
# 0.815977 4500.0 5500.0
# 0.937361 5500.0 9000.0
# upper and lower only differ at exact values of F where lower is loss and upper is loss_s
# in between must take the next value for lower and the previous value for next to get the same answer
self.q_temp = self.q_temp.sort_index()
# that q_temp left cts, want right continuous:
self.q_temp['loss_s'] = self.q_temp.loss.shift(-1)
self.q_temp.iloc[-1, 1] = self.q_temp.iloc[-1, 0]
# create interp functions
# old
# self._linear_quantile_function['upper'] = \
# interpolate.interp1d(self.q_temp.index, self.q_temp.loss_s, kind='previous', bounds_error=False,
# fill_value='extrapolate')
# self._linear_quantile_function['lower'] = \
# interpolate.interp1d(self.q_temp.index, self.q_temp.loss, kind='previous', bounds_error=False,
# fill_value='extrapolate')
# revised
self._linear_quantile_function['upper'] = \
interpolate.interp1d(self.q_temp.index, self.q_temp.loss_s, kind='previous', bounds_error=False,
fill_value='extrapolate')
self._linear_quantile_function['lower'] = \
interpolate.interp1d(self.q_temp.index, self.q_temp.loss, kind='next', bounds_error=False,
fill_value='extrapolate')
# change to using loss_s
self._linear_quantile_function['middle'] = \
interpolate.interp1d(self.q_temp.index, self.q_temp.loss_s, kind='linear', bounds_error=False,
fill_value='extrapolate')
l = float(self._linear_quantile_function[kind](p))
# because we are not interpolating the returned value must (should) be in the index...
assert kind == 'middle' or l in self.density_df.index
return l
def cdf(self, x):
"""
distribution function
:param x:
:return:
"""
if self._cdf is None:
# Dec 2019: kind='linear' --> kind='previous'
self._cdf = interpolate.interp1d(self.density_df.loss, self.density_df.F, kind='previous',
bounds_error=False, fill_value='extrapolate')
return self._cdf(x)
def sf(self, x):
"""
survival function
:param x:
:return:
"""
return 1 - self.cdf(x)
def pdf(self, x):
"""
probability density function, assuming a continuous approximation of the bucketed density
:param x:
:return:
"""
if self._pdf is None:
self._pdf = interpolate.interp1d(self.density_df.loss, self.density_df.p_total, kind='linear',
bounds_error=False, fill_value='extrapolate')
return self._pdf(x) / self.bs
# # make some handy aliases; delete these go strictly with scipy.stats notation
# def F(self, x):
# """
# handy alias for distribution, CDF
# :param x:
# :return:
# """
# return self.cdf(x)
#
# def S(self, x):
# """
# handy alias for survival function, S
# :param x:
# :return:
# """
# return self.sf(x)
def var(self, p):
"""
value at risk = alias for quantile function
:param p:
:return:
"""
return self.q(p)
def tvar(self, p, kind='interp'):
"""
Compute the tail value at risk at threshold p
Really this function returns ES
Definition 2.6 (Tail mean and Expected Shortfall)
Assume E[X−] < ∞. Then
x¯(α) = TM_α(X) = α^{−1}E[X 1{X≤x(α)}] + x(α) (α − P[X ≤ x(α)])
is α-tail mean at level α the of X.
Acerbi and Tasche (2002)
We are interested in the right hand exceedence [?? note > vs ≥]
α^{−1}E[X 1{X > x(α)}] + x(α) (P[X ≤ x(α)] − α)
McNeil etc. p66-70 - this follows from def of ES as an integral
of the quantile function
:param p:
:param kind: 'interp' = interpolate exgta_total; 'tail' tail integral, 'body' NYI - (ex - body integral)/(1-p)+v
'inverse' from capital to p using interp method
:return:
"""
assert self.density_df is not None
if kind == 'tail':
# original
# _var = self.q(p)
# ex = self.density_df.loc[_var + self.bs:, ['p_total', 'loss']].product(axis=1).sum()
# pip = (self.density_df.loc[_var, 'F'] - p) * _var
# t_var = 1 / (1 - p) * (ex + pip)
# return t_var
# revised
if self._tail_var2 is None:
self._tail_var2 = self.density_df[['p_total', 'loss']].product(axis=1).iloc[::-1].cumsum().iloc[::-1]
_var = self.q(p)
ex = self._tail_var2.loc[_var + self.bs]
pip = (self.density_df.loc[_var, 'F'] - p) * _var
t_var = 1 / (1 - p) * (ex + pip)
return t_var
elif kind == 'interp':
# original implementation interpolated
if self._tail_var is None:
# make tvar function
sup = (self.density_df.p_total[::-1] > 0).idxmax()
if sup == self.density_df.index[-1]:
sup = np.inf
_x = self.density_df.F
_y = self.density_df.exgta_total
else:
_x = self.density_df.F.values[:self.density_df.index.get_loc(sup)]
_y = self.density_df.exgta_total.values[:self.density_df.index.get_loc(sup)]
p0 = self.density_df.at[0., 'F']
if p0 > 0:
ps = np.linspace(0, p0, 200, endpoint=False)
tempx = np.hstack((ps, _x))
tempy = np.hstack((self.ex / (1-ps), _y))
self._tail_var = interpolate.interp1d(tempx, tempy,
kind='linear', bounds_error=False,
fill_value=(self.ex, sup))
else:
self._tail_var = interpolate.interp1d(_x, _y, kind='linear', bounds_error=False,
fill_value=(self.ex, sup))
if type(p) in [float, np.float]:
return float(self._tail_var(p))
else:
return self._tail_var(p)
elif kind == 'inverse':
if self._inverse_tail_var is None:
# make tvar function
self._inverse_tail_var = interpolate.interp1d(self.density_df.exgta_total, self.density_df.F,
kind='linear', bounds_error=False,
fill_value='extrapolate')
if type(p) in [int, np.int, float, np.float]:
return float(self._inverse_tail_var(p))
else:
return self._inverse_tail_var(p)
else:
raise ValueError(f'Inadmissible kind passed to tvar; options are interp (default), inverse, or tail')
def tvar_threshold(self, p, kind):
"""
Find the value pt such that TVaR(pt) = VaR(p) using numerical Newton Raphson
"""
a = self.q(p, kind)
def f(p):
return self.tvar(p) - a
loop = 0
p1 = 1 - 2 * (1 - p)
fp1 = f(p1)
delta = 1e-5
while abs(fp1) > 1e-6 and loop < 10:
df1 = (f(p1 + delta) - fp1) / delta
p1 = p1 - fp1 / df1
fp1 = f(p1)
loop += 1
if loop == 10:
raise ValueError(f'Difficulty computing TVaR to match VaR at p={p}')
return p1
def equal_risk_var_tvar(self, p_v, p_t):
"""
solve for equal risk var and tvar: find pv and pt such that sum of
individual line VaR/TVaR at pv/pt equals the VaR(p) or TVaR(p_t)
these won't return elements in the index because you have to interpolate
hence using kind=middle
"""
# these two should obviously be the same
target_v = self.q(p_v, 'middle')
target_t = self.tvar(p_t)
def fv(p):
return sum([float(a.q(p, 'middle')) for a in self]) - target_v
def ft(p):
return sum([float(a.tvar(p)) for a in self]) - target_t
ans = np.zeros(2)
for i, f in enumerate([fv, ft]):
p1 = 1 - 2 * (1 - (p_v if i == 0 else p_t))
fp1 = f(p1)
loop = 0
delta = 1e-5
while abs(fp1) > 1e-6 and loop < 10:
dfp1 = (f(p1 + delta) - fp1) / delta
p1 = p1 - fp1 / dfp1
fp1 = f(p1)
loop += 1
if loop == 100:
raise ValueError(f'Trouble finding equal risk {"TVaR" if i else "VaR"} at p_v={p_v}, p_t={p_t}. '
'No convergence after 100 iterations. ')
ans[i] = p1
return ans
def equal_risk_epd(self, a):
"""
determine the common epd threshold so sum sa equals a
"""
def f(p):
return sum([self.epd_2_assets[(l, 0)](p) for l in self.line_names]) - a
p1 = self.assets_2_epd[('total', 0)](a)
fp1 = f(p1)
loop = 0
delta = 1e-5
while abs(fp1) > 1e-6 and loop < 10:
dfp1 = (f(p1 + delta) - fp1) / delta
p1 = p1 - fp1 / dfp1
fp1 = f(p1)
loop += 1
if loop == 100:
raise ValueError(f'Trouble finding equal risk EPD at pe={pe}. No convergence after 100 iterations. ')
return p1
def merton_perold(self, p, kind='lower'):
"""
compute <NAME>old capital allocation at VaR(p) capital using VaR as risk measure
v = q(p)
TODO TVaR version of <NAME>
"""
# figure total assets
a = self.q(p, kind)
# shorthand abbreviation
df = self.density_df
loss = df.loss
ans = []
total = 0
for l in self.line_names:
q = self.density_df.loss.iloc[np.searchsorted(self.density_df[f'ημ_{l}'].cumsum(), .995, side='right')]
diff = a - q
ans.append(diff)
total += diff
ans.append(total)
return ans
def cotvar(self, p):
"""
make the p co-tvar asset allocation using ISA
Asset alloc = exgta = tail expected value, treating TVaR like a pricing variable
"""
av = self.q(p)
return self.density_df.loc[av, [f'exgta_{l}' for l in self.line_names_ex]].values
def as_severity(self, limit=np.inf, attachment=0, conditional=False):
"""
convert into a severity without recomputing
throws error if self not updated
:param limit:
:param attachment:
:param conditional:
:return:
"""
if self.density_df is None:
raise ValueError('Must update prior to converting to severity')
return Severity(sev_name=self, sev_a=self.log2, sev_b=self.bs,
exp_attachment=attachment, exp_limit=limit, sev_conditional=conditional)
def fit(self, approx_type='slognorm', output='agg'):
"""
returns a dictionary specification of the portfolio aggregate_project
if updated uses empirical moments, otherwise uses theoretic moments
:param approx_type: slognorm | sgamma
:param output: return a dict or agg language specification
:return:
"""
if self.audit_df is None:
# not updated
m = self.statistics_df.loc[('agg', 'mean'), 'total']
cv = self.statistics_df.loc[('agg', 'cv'), 'total']
skew = self.statistics_df.loc[('agg', 'skew'), 'total']
else:
# use statistics_df matched to computed aggregate_project
m, cv, skew = self.audit_df.loc['total', ['EmpMean', 'EmpCV', 'EmpSkew']]
name = f'{approx_type[0:4]}~{self.name[0:5]}'
agg_str = f'agg {name} 1 claim sev '
if approx_type == 'slognorm':
shift, mu, sigma = sln_fit(m, cv, skew)
# self.fzapprox = ss.lognorm(sigma, scale=np.exp(mu), loc=shift)
sev = {'sev_name': 'lognorm', 'sev_shape': sigma, 'sev_scale': np.exp(mu), 'sev_loc': shift}
agg_str += f'{np.exp(mu)} * lognorm {sigma} + {shift} '
elif approx_type == 'sgamma':
shift, alpha, theta = sgamma_fit(m, cv, skew)
# self.fzapprox = ss.gamma(alpha, scale=theta, loc=shift)
sev = {'sev_name': 'gamma', 'sev_a': alpha, 'sev_scale': theta, 'sev_loc': shift}
agg_str += f'{theta} * lognorm {alpha} + {shift} '
else:
raise ValueError(f'Inadmissible approx_type {approx_type} passed to fit')
if output == 'agg':
agg_str += ' fixed'
return agg_str
else:
return {'name': name, 'note': f'frozen version of {self.name}', 'exp_en': 1, **sev, 'freq_name': 'fixed'}
def collapse(self, approx_type='slognorm'):
"""
returns new Portfolio with the fit
Deprecated...prefer uw(self.fit()) to go through the agg language approach
:param approx_type: slognorm | sgamma
:return:
"""
spec = self.fit(approx_type, output='dict')
logger.debug(f'Portfolio.collapse | Collapse created new Portfolio with spec {spec}')
logger.warning(f'Portfolio.collapse | Collapse is deprecated; use fit() instead.')
return Portfolio(f'Collapsed {self.name}', [spec])
def percentiles(self, pvalues=None):
"""
report_ser on percentiles and large losses
uses interpolation, audit_df uses nearest
:pvalues: optional vector of log values to use. If None sensible defaults provided
:return: DataFrame of percentiles indexed by line and log
"""
df = pd.DataFrame(columns=['line', 'log', 'Agg Quantile'])
df = df.set_index(['line', 'log'])
# df.columns.name = 'perspective'
if pvalues is None:
pvalues = [0.5, 0.75, 0.8, 0.85, 0.9, 0.95, 0.98, 0.99, 0.994, 0.995, 0.999, 0.9999]
for line in self.line_names_ex:
q_agg = interpolate.interp1d(self.density_df[f'p_{line}'].cumsum(), self.density_df.loss,
kind='linear', bounds_error=False, fill_value='extrapolate')
for p in pvalues:
qq = q_agg(p)
df.loc[(line, p), :] = [float(qq)]
df = df.unstack(level=1)
return df
def recommend_bucket(self):
"""
data to help estimate a good bucket size
:return:
"""
df = pd.DataFrame(columns=['line', 'bs10'])
df = df.set_index('line')
for a in self.agg_list:
df.loc[a.name, :] = [a.recommend_bucket(10)]
df['bs11'] = df['bs10'] / 2
df['bs12'] = df['bs10'] / 4
df['bs13'] = df['bs10'] / 8
df['bs14'] = df['bs10'] / 16
df['bs15'] = df['bs10'] / 32
df['bs16'] = df['bs10'] / 64
df['bs17'] = df['bs10'] / 128
df['bs18'] = df['bs10'] / 256
df['bs19'] = df['bs10'] / 515
df['bs20'] = df['bs10'] / 1024
df.loc['total', :] = df.sum()
return df
def best_bucket(self, log2=16):
bs = sum([a.recommend_bucket(log2) for a in self])
return round_bucket(bs)
def update(self, log2, bs, approx_freq_ge=100, approx_type='slognorm', remove_fuzz=False,
sev_calc='discrete', discretization_calc='survival', normalize=True, padding=1, tilt_amount=0, epds=None,
trim_df=False, verbose=False, add_exa=True, aggregate_cession_function=None):
"""
create density_df, performs convolution. optionally adds additional information if ``add_exa=True``
for allocation and priority analysis
tilting: [@Grubel1999]: Computation of Compound Distributions I: Aliasing Errors and Exponential Tilting
(ASTIN 1999)
tilt x numbuck < 20 is recommended log. 210
num buckets and max loss from bucket size
:param log2:
:param bs: bucket size
:param approx_freq_ge: use method of moments if frequency is larger than ``approx_freq_ge``
:param approx_type: type of method of moments approx to use (slognorm or sgamma)
:param remove_fuzz: remove machine noise elements from FFT
:param sev_calc: how to calculate the severity, discrete (point masses as xs) or continuous (uniform between xs points)
:param discretization_calc: survival or distribution (accurate on right or left tails)
:param normalize: if true, normalize the severity so sum probs = 1. This is generally what you want; but
:param padding: for fft 1 = double, 2 = quadruple
:param tilt_amount: for tiling methodology - see notes on density for suggested parameters
:param epds: epd points for priority analysis; if None-> sensible defaults
:param trim_df: remove unnecessary columns from density_df before returning
:param verbose: level of output
:param add_exa: run add_exa to append additional allocation information needed for pricing; if add_exa also add
epd info
:param aggregate_cession_function: function of Portfolio object that adjusts individual line densities; applied
after line aggs created but before creating not-lines; actual statistics do not reflect impact.
:return:
"""
self.log2 = log2
self.bs = bs
self.padding = padding
self.tilt_amount = tilt_amount
self.approx_type = approx_type
self.sev_calc = sev_calc
self._remove_fuzz = remove_fuzz
self.approx_type = approx_type
self.approx_freq_ge = approx_freq_ge
self.discretization_calc = discretization_calc
if self.hash_rep_at_last_update == hash(self):
logger.warning(f'Nothing has changed since last update at {self.last_update}')
return
self._linear_quantile_function = None
ft_line_density = {}
# line_density = {}
# not_line_density = {}
# add the densities
# tilting: [@Grubel1999]: Computation of Compound Distributions I: Aliasing Errors and Exponential Tilting
# (ASTIN 1999)
# tilt x numbuck < 20 recommended log. 210
# num buckets and max loss from bucket size
N = 1 << log2
MAXL = N * bs
xs = np.linspace(0, MAXL, N, endpoint=False)
# make all the single line aggs
# note: looks like duplication but will all be references
# easier for add_exa to have as part of the portfolio module
# tilt
if self.tilt_amount != 0:
tilt_vector = np.exp(self.tilt_amount * np.arange(N))
else:
tilt_vector = None
# where the answer will live
self.density_df = pd.DataFrame(index=xs)
self.density_df['loss'] = xs
ft_all = None
for agg in self.agg_list:
raw_nm = agg.name
nm = f'p_{agg.name}'
_a = agg.update(xs, self.padding, tilt_vector, 'exact' if agg.n < approx_freq_ge else approx_type,
sev_calc, discretization_calc, normalize, verbose=verbose)
if verbose:
display(_a)
if aggregate_cession_function is not None:
aggregate_cession_function(agg, self.padding, tilt_vector)
ft_line_density[raw_nm] = agg.ftagg_density
self.density_df[nm] = agg.agg_density
if ft_all is None:
ft_all = np.copy(ft_line_density[raw_nm])
else:
ft_all *= ft_line_density[raw_nm]
self.density_df['p_total'] = np.real(ift(ft_all, self.padding, tilt_vector))
# ft_line_density['total'] = ft_all
# make the not self.line_density = sum of all but the given line
# have the issue here that if you divide and the dist
# is symmetric then you get a div zero...
for line in self.line_names:
ft_not = np.ones_like(ft_all)
if np.any(ft_line_density[line] == 0):
# have to build up
for not_line in self.line_names:
if not_line != line:
ft_not *= ft_line_density[not_line]
else:
if len(self.line_names) > 1:
ft_not = ft_all / ft_line_density[line]
self.density_df[f'ημ_{line}'] = np.real(ift(ft_not, self.padding, tilt_vector))
self.remove_fuzz(log='update')
# make audit statistics_df df
theoretical_stats = self.statistics_df.T.filter(regex='agg')
theoretical_stats.columns = ['EX1', 'EX2', 'EX3', 'Mean', 'CV', 'Skew', 'Limit', 'P99.9Est']
theoretical_stats = theoretical_stats[['Mean', 'CV', 'Skew', 'Limit', 'P99.9Est']]
# self.audit_percentiles = [0.9, 0.95, 0.99, 0.995, 0.996, 0.999, 0.9999, 1 - 1e-6]
self.audit_df = pd.DataFrame(
columns=['Sum probs', 'EmpMean', 'EmpCV', 'EmpSkew', "EmpKurt", 'EmpEX1', 'EmpEX2', 'EmpEX3'] +
['P' + str(100 * i) for i in self.audit_percentiles])
for col in self.line_names_ex:
sump = np.sum(self.density_df[f'p_{col}'])
t = self.density_df[f'p_{col}'] * self.density_df['loss']
ex1 = np.sum(t)
t *= self.density_df['loss']
ex2 = np.sum(t)
t *= self.density_df['loss']
ex3 = np.sum(t)
t *= self.density_df['loss']
ex4 = np.sum(t)
m, cv, s = MomentAggregator.static_moments_to_mcvsk(ex1, ex2, ex3)
# empirical kurtosis
kurt = (ex4 - 4 * ex3 * ex1 + 6 * ex1 ** 2 * ex2 - 3 * ex1 ** 4) / ((m * cv) ** 4) - 3
ps = np.zeros((len(self.audit_percentiles)))
temp = self.density_df[f'p_{col}'].cumsum()
for i, p in enumerate(self.audit_percentiles):
ps[i] = (temp > p).idxmax()
newrow = [sump, m, cv, s, kurt, ex1, ex2, ex3] + list(ps)
self.audit_df.loc[col, :] = newrow
self.audit_df = pd.concat((theoretical_stats, self.audit_df), axis=1, sort=True)
self.audit_df['MeanErr'] = self.audit_df['EmpMean'] / self.audit_df['Mean'] - 1
self.audit_df['CVErr'] = self.audit_df['EmpCV'] / self.audit_df['CV'] - 1
self.audit_df['SkewErr'] = self.audit_df['EmpSkew'] / self.audit_df['Skew'] - 1
# add exa details
if add_exa:
self.add_exa(self.density_df, details=True)
# default priority analysis
logger.debug('Adding EPDs in Portfolio.update')
if epds is None:
epds = np.hstack(
[np.linspace(0.5, 0.1, 4, endpoint=False)] +
[np.linspace(10 ** -n, 10 ** -(n + 1), 9, endpoint=False) for n in range(1, 7)])
epds = np.round(epds, 7)
self.priority_capital_df = pd.DataFrame(index=pd.Index(epds))
for col in self.line_names:
for i in range(3):
self.priority_capital_df['{:}_{:}'.format(col, i)] = self.epd_2_assets[(col, i)](epds)
self.priority_capital_df['{:}_{:}'.format('total', 0)] = self.epd_2_assets[('total', 0)](
epds)
col = 'not ' + col
for i in range(2):
self.priority_capital_df['{:}_{:}'.format(col, i)] = self.epd_2_assets[(col, i)](epds)
self.priority_capital_df['{:}_{:}'.format('total', 0)] = self.epd_2_assets[('total', 0)](epds)
self.priority_capital_df.columns = self.priority_capital_df.columns.str.split("_", expand=True)
self.priority_capital_df.sort_index(axis=1, level=1, inplace=True)
self.priority_capital_df.sort_index(axis=0, inplace=True)
else:
# at least want F and S to get quantile functions
self.density_df['F'] = np.cumsum(self.density_df.p_total)
self.density_df['S'] = 1 - self.density_df.F
self.ex = self.audit_df.loc['total', 'EmpMean']
self.last_update = np.datetime64('now')
self.hash_rep_at_last_update = hash(self)
if trim_df:
self.trim_df()
# invalidate stored functions
self._linear_quantile_function = None
self.q_temp = None
self._cdf = None
def update_efficiently(self, log2, bs, approx_freq_ge=100, approx_type='slognorm',
sev_calc='discrete', discretization_calc='survival', normalize=True, padding=1):
"""
runs stripped down versions of update and add_exa - bare bones
code copied from those routines and cleaned for comments etc.
:param log2:
:param bs:
:param approx_freq_ge:
:param approx_type:
:param remove_fuzz:
:param sev_calc:
:param discretization_calc:
:param padding:
:return:
"""
self.log2 = log2
self.bs = bs
self.padding = padding
self.approx_type = approx_type
self.sev_calc = sev_calc
self._remove_fuzz = True
self.approx_type = approx_type
self.approx_freq_ge = approx_freq_ge
self.discretization_calc = discretization_calc
ft_line_density = {}
N = 1 << log2
MAXL = N * bs
xs = np.linspace(0, MAXL, N, endpoint=False)
# no tilt for efficient mode
tilt_vector = None
# where the answer will live
self.density_df = pd.DataFrame(index=xs)
self.density_df['loss'] = xs
ft_all = None
for agg in self.agg_list:
raw_nm = agg.name
nm = f'p_{agg.name}'
_a = agg.update_efficiently(xs, self.padding, 'exact' if agg.n < approx_freq_ge else approx_type,
sev_calc, discretization_calc, normalize)
ft_line_density[raw_nm] = agg.ftagg_density
self.density_df[nm] = agg.agg_density
if ft_all is None:
ft_all = np.copy(ft_line_density[raw_nm])
else:
ft_all *= ft_line_density[raw_nm]
self.density_df['p_total'] = np.real(ift(ft_all, self.padding, tilt_vector))
# make the not self.line_density = sum of all but the given line
ft_nots = {}
for line in self.line_names:
ft_not = np.ones_like(ft_all)
if np.any(ft_line_density[line] == 0):
# have to build up
for not_line in self.line_names:
if not_line != line:
ft_not *= ft_line_density[not_line]
else:
if len(self.line_names) > 1:
ft_not = ft_all / ft_line_density[line]
self.density_df[f'ημ_{line}'] = np.real(ift(ft_not, self.padding, tilt_vector))
ft_nots[line] = ft_not
self.remove_fuzz(log='update_efficiently')
# no audit statistics_df
# BEGIN add_exa ================================================================================================
# add exa details now in-line
# def add_exa(self, df, details, ft_nots=None):
# Call is self.add_exa(self.density_df, details=True)
# name in add_exa, keeps code shorter
df = self.density_df
cut_eps = np.finfo(np.float).eps
# sum of p_total is so important...we will rescale it...
if not np.all(df.p_total >= 0):
# have negative densities...get rid of them
first_neg = np.argwhere((df.p_total < 0).to_numpy()).min()
sum_p_total = df.p_total.sum()
df['F'] = np.cumsum(df.p_total)
df['S'] = \
df.p_total.shift(-1, fill_value=min(df.p_total.iloc[-1], max(0, 1. - (df.p_total.sum()))))[::-1].cumsum()[::-1]
# E(min(X, a))
# df['exa_total'] = self.cumintegral(df['S'])
df['exa_total'] = df.S.shift(1, fill_value=0).cumsum() * self.bs
df['lev_total'] = df['exa_total']
df['exlea_total'] = \
(df.exa_total - df.loss * df.S) / df.F
n_ = df.shape[0]
if n_ < 1100:
mult = 1
elif n_ < 15000:
mult = 10
else:
mult = 100
loss_max = df[['loss', 'exlea_total']].query(' exlea_total>loss ').loss.max()
if np.isnan(loss_max):
loss_max = 0
else:
loss_max += mult * bs
# try nan in place of 0 V
df.loc[0:loss_max, 'exlea_total'] = np.nan
df['e_total'] = | np.sum(df.p_total * df.loss) | numpy.sum |
import numpy as np
import scipy.io as sio
import torch.utils.data
from torch.utils.data import DataLoader
import pdb
class NeuralData(torch.utils.data.Dataset):
def __init__(self, data, data2, num_trials_per_class=91):
self.data = data
self.data2 = data2
self.num_trials_per_class = num_trials_per_class
self.size = data.shape[0]
def __getitem__(self, index):
input1_data = self.data[index]
input2_data = self.data2[index]
target = index // self.num_trials_per_class
return input1_data, input2_data, target
def __len__(self):
return self.size
def break_correlations(data):
# data is a TxN matrix, representing trials by neurons (and I want to permute the neurons across trials differently to break single trial correlations)
permuted_data = np.zeros_like(data)
for i in range(data.shape[1]):
permuted_data[:, i] = np.random.permutation(data[:, i])
return permuted_data
def get_neural_nocorr_loader(workers=0, batch_size=10, time1=None, time2=None, deltat=None):
data = sio.loadmat('data/ps4_realdata.mat') # load the .mat file.
NumTrainData = data['train_trial'].shape[0]
NumClass = data['train_trial'].shape[1]
NumTestData = data['test_trial'].shape[0]
trainDataArr = np.zeros((NumClass, NumTrainData, 97)) # contains the firing rates for all neurons on all 8 x 91 trials in the training set
testDataArr = np.zeros((NumClass, NumTestData, 97)) # for the testing set.
for classIX in range(NumClass):
for trainDataIX in range(NumTrainData):
trainDataArr[classIX, trainDataIX, :] = | np.sum(data['train_trial'][trainDataIX, classIX][1][:, 350:550], 1) | numpy.sum |
# MIT License
#
# Copyright (c) 2020 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Test cases for the python API for tsdate.
"""
import unittest
import collections
import json
import warnings
import math
import numpy as np
import scipy
import msprime
import tsinfer
import tskit
import tsdate
from tsdate.base import NodeGridValues
from tsdate.prior import (SpansBySamples, PriorParams, ConditionalCoalescentTimes,
fill_priors, gamma_approx)
from tsdate.date import (Likelihoods, LogLikelihoods, LogLikelihoodsStreaming,
InOutAlgorithms, posterior_mean_var, constrain_ages_topo,
get_dates, date)
from tsdate.util import nodes_time
import utility_functions
class TestBasicFunctions(unittest.TestCase):
"""
Test for some of the basic functions used in tsdate
"""
def test_alpha_prob(self):
self.assertEqual(ConditionalCoalescentTimes.m_prob(2, 2, 3), 1.)
self.assertEqual(ConditionalCoalescentTimes.m_prob(2, 2, 4), 0.25)
def test_tau_expect(self):
self.assertEqual(ConditionalCoalescentTimes.tau_expect(10, 10), 1.8)
self.assertEqual(ConditionalCoalescentTimes.tau_expect(10, 100), 0.09)
self.assertEqual(ConditionalCoalescentTimes.tau_expect(100, 100), 1.98)
self.assertEqual(ConditionalCoalescentTimes.tau_expect(5, 10), 0.4)
def test_tau_squared_conditional(self):
self.assertAlmostEqual(
ConditionalCoalescentTimes.tau_squared_conditional(1, 10), 4.3981418)
self.assertAlmostEqual(
ConditionalCoalescentTimes.tau_squared_conditional(100, 100),
-4.87890977e-18)
def test_tau_var(self):
self.assertEqual(
ConditionalCoalescentTimes.tau_var(2, 2), 1)
self.assertAlmostEqual(
ConditionalCoalescentTimes.tau_var(10, 20), 0.0922995960)
self.assertAlmostEqual(
ConditionalCoalescentTimes.tau_var(50, 50), 1.15946186)
def test_gamma_approx(self):
self.assertEqual(gamma_approx(2, 1), (4., 2.))
self.assertEqual(gamma_approx(0.5, 0.1), (2.5, 5.0))
class TestNodeTipWeights(unittest.TestCase):
def verify_weights(self, ts):
span_data = SpansBySamples(ts)
# Check all non-sample nodes in a tree are represented
nonsample_nodes = collections.defaultdict(float)
for tree in ts.trees():
for n in tree.nodes():
if not tree.is_sample(n):
# do not count a span of a node where there are no sample descendants
nonsample_nodes[n] += (tree.span if tree.num_samples(n) > 0 else 0)
self.assertEqual(set(span_data.nodes_to_date), set(nonsample_nodes.keys()))
for id, span in nonsample_nodes.items():
self.assertAlmostEqual(span, span_data.node_spans[id])
for focal_node in span_data.nodes_to_date:
wt = 0
for _, weights in span_data.get_weights(focal_node).items():
self.assertTrue(0 <= focal_node < ts.num_nodes)
wt += | np.sum(weights['weight']) | numpy.sum |
from assembler import Assembler
from assembler import Form
from assembler import Kernel
from diagnostics import Verbose
from fem import QuadFE
from fem import DofHandler
from fem import Basis
from function import Constant
from function import Explicit
from function import Map
from function import Nodal
from gmrf import Covariance
from gmrf import GaussianField
from mesh import QuadMesh
from mesh import Mesh1D
from plot import Plot
from solver import LS
import numpy as np
from scipy import linalg as la
from scipy.stats import norm
import scipy.sparse as sp
import matplotlib.pyplot as plt
import TasmanianSG
from tqdm import tqdm
"""
System
-div(exp(K)*grad(y)) = b + u, x in D
y = g , x in D_Dir
exp(K)*grad(y)*n = 0 , x in D_Neu
Random field:
K ~ GaussianField
Cost Functional
J(u) = E(|y(u)-y_d|**2) + alpha/2*|u|**2-
Minimize using a sparse grids to estimate E
"""
def sample_cost_gradient(state,adjoint,A,M,u,y_data,gamma):
"""
Evaluate the cost functional at
"""
#
# Solve state equation
#
state.set_matrix(sp.csr_matrix(A, copy=True))
b = M.dot(u)
state.set_rhs(b)
state.solve_system()
y = state.get_solution(as_function=False)
dy = y-y_data
# Cost
f = 0.5*dy.T.dot(M.dot(dy)) + 0.5*gamma*u.T.dot(M.dot(u))
#
# Solve adjoint equation
#
adjoint.set_matrix(sp.csr_matrix(A, copy=True))
adjoint.set_rhs(M.dot(dy))
adjoint.solve_system()
p = adjoint.get_solution(as_function=False)
# Gradient
g = M.dot(p+u)
return f, g, y, p
# =============================================================================
# Variational Form
# =============================================================================
comment = Verbose()
#
# Mesh
#
# Computational domain
x_min = 0
x_max = 2
mesh = Mesh1D(box=[x_min, x_max], resolution=(100,))
# Mark Dirichlet Vertices
mesh.mark_region('left', lambda x: np.abs(x)<1e-9)
mesh.mark_region('right', lambda x: np.abs(x-2)<1e-9)
#
# Finite element spaces
#
Q1 = QuadFE(mesh.dim(), 'Q1')
# Dofhandler for state
dh = DofHandler(mesh, Q1)
dh.distribute_dofs()
m = dh.n_dofs()
dh.set_dof_vertices()
x = dh.get_dof_vertices()
# Basis functions
phi = Basis(dh, 'v')
phi_x = Basis(dh, 'vx')
state = LS(phi)
state.add_dirichlet_constraint('left',1)
state.add_dirichlet_constraint('right',0)
state.set_constraint_relation()
adjoint = LS(phi)
adjoint.add_dirichlet_constraint('left',0)
adjoint.add_dirichlet_constraint('right',0)
adjoint.set_constraint_relation()
# =============================================================================
# System Parameters
# =============================================================================
# Target
y_target = Nodal(f=lambda x: 3-4*(x[:,0]-1)**2, dim=1, dofhandler=dh)
y_data = y_target.data()
# Regularization parameter
#gamma = 0.00001
gamma = 1e-5
# Inital guess
u = np.zeros((m,1))
# =============================================================================
# Random Diffusion Parameter
# =============================================================================
# Initialize diffusion
q = Nodal(data=np.empty((m,1)), dofhandler=dh)
# Log-normal field
sgm = 1
# Diffusion covariance function
cov = Covariance(dh, name='gaussian', parameters={'sgm':1,'l':0.001})
# Compute KL expansion
lmd, V = la.eigh(cov.get_matrix())
i_sorted = np.argsort(lmd)[::-1]
lmd = lmd[i_sorted]
V = V[:,i_sorted]
# Determine number of KL terms
tol_KL = 1e-5
r_max = 10
total_energy = np.sum(lmd**2)
for r in range(10):
lr = lmd[:r]
relative_error = 1-np.sum(lr**2)/total_energy
if relative_error < tol_KL:
break
print('Number of terms in the KL expansion:', r)
print('Relative error:', relative_error)
Vr = V[:,:r]
# =============================================================================
# Monte Carlo Sample
# =============================================================================
n_batches = 100
n_samples_per_batch = 100
n_samples = n_batches*n_samples_per_batch
f_mc = []
g_mc = []
for n_batch in tqdm(range(n_batches)):
#
# Generate random sample
#
z = np.random.normal(size=(r,n_samples_per_batch))
q_smpl = Vr.dot(np.diag(np.sqrt(lr)).dot(z))
q.set_data(q_smpl)
expq = Kernel(q, F=lambda f:np.exp(f))
plot = Plot()
plot.line(q, i_sample=np.arange(100))
#
# Assemble system
#
problems = [[Form(expq, test=phi_x, trial=phi_x)],
[Form(test=phi, trial=phi)]]
assembler = Assembler(problems, mesh)
assembler.assemble()
M = assembler.af[0]['bilinear'].get_matrix()[0]
for n in range(n_samples_per_batch):
A = assembler.af[0]['bilinear'].get_matrix()[n]
fn, gn, yn, pn = sample_cost_gradient(state,adjoint,A,M,u,y_data,gamma)
f_mc.append(fn)
g_mc.append(gn)
f_mc = np.concatenate(f_mc, axis=1)
g_mc = np.concatenate(g_mc, axis=1)
np.save('f_mc',f_mc)
np.save('g_mc',g_mc)
# =============================================================================
# Sparse grid sample
# =============================================================================
tasmanian_library="/home/hans-werner/bin/TASMANIAN-6.0/libtasmaniansparsegrid.so"
f_grid = TasmanianSG.TasmanianSparseGrid(tasmanian_library=tasmanian_library)
g_grid = TasmanianSG.TasmanianSparseGrid(tasmanian_library=tasmanian_library)
n_levels = 5
for level in range(n_levels):
f_grid.makeGlobalGrid(r,1,level,'level','gauss-hermite')
g_grid.makeGlobalGrid(r,m,level,'level','gauss-hermite')
z = | np.sqrt(2) | numpy.sqrt |
# -*- coding: utf-8 -*-
from screws.freeze.main import FrozenOnly
import numpy as np
class ___3dCSCG_1Form_Vortex_Detection___(FrozenOnly):
"""A wrapper of all vortex detection methods.
So, we consider this 1 form as a variable of a flow field.
"""
def __init__(self, _1sf):
self._sf_ = _1sf
self._freeze_self_()
def ___PRIVATE_generate_gradient_tensor_at___(self, xi, eta, sigma):
"""We compute the gradient tensor of this 1form.
To do so, we first project this 1-form into a vector of 3 standard 0-forms which represent
the three components. Then we do the gradient (apply the incidence matrix E10) to each
standard 0-form.
It returns a 3 by 3 tensor representing
((du_dx, du_dy, du_dz),
(dv_dx, dv_dy, dv_dz),
(dw_dx, dw_dy, dw_dz)).
Each value are 3d evaluated at *meshgrid(xi, eta, sigma, indexing='ij)
:param xi: 1d increasing array in [-1,1].
:param eta: 1d increasing array in [-1,1].
:param sigma: 1d increasing array in [-1,1].
"""
assert np.ndim(xi) == 1 and np.all(np.diff(xi) >0) and np.max(xi) <= 1 and np.min(xi) >= -1, \
f"xi={xi} wrong, should be 1d array in [-1,1] and increasing."
assert np.ndim(eta) == 1 and np.all(np.diff(eta) >0) and np.max(eta) <= 1 and | np.min(eta) | numpy.min |
import numpy as np
from scipy import interpolate
from stochastic.filWin import FilterWindows2D
def nextpow2(n):
m_f = | np.log2(n) | numpy.log2 |
'''
defines the data stream classes as well as important
functions for general pattern analysis
'''
import numpy as np
import scipy.signal
import random
import math
import itertools
import copy
# # # ------ CLASS INDEPENDENT FUNCTIONS --------------------------------------
def de2bis(data, B):
'''
performs decimal to bin conversion for unsigned
and signed numbers (automatically done)
'''
if ( # Check if it fits into the specified bit width
(min(data) < 0 and max(data) >= (1 << (B-1))) or
(max(data) >= (1 << B)) or (min(data) < -(1 << (B-1)))):
raise ValueError('de2bi: Bit width %i is to small' % B)
else:
return [np.binary_repr(x, width=B) for x in data]
def uniform_data(N, B, ro=0):
'''
generates N fullly random (uniform distributed) B-bit pattterns
(signed) with a sample corelation of ro
'''
N = int(N)
data = np.random.uniform(-(1 << B-1), (1 << B-1)-1, N) # random signal
data = data - np.mean(data)
if ro != 0:
y = math.sqrt(1-ro**2)*scipy.signal.lfilter([1, 0], [1, -ro], data)
y = y - np.mean(y) # remove mean
else:
y = data
data = np.clip(np.round(y), -(1 << B-1), (1 << B-1)-1)
return np.array(data).astype(int)
def gaussian_data(N, B, log2_std, mu, ro=0, is_signed=True):
'''
generates N gaussian distributed B-bit patterns
with a std derivation (sigma) of 2**std (std given in bits/log2),
a mean of mu, and a rel. pattern correlation of ro
OPTIONAL:
is_signed = if true according bit vectors are unsigned
DEFAULT: True
'''
N = int(N)
x = np.random.normal(0, 1, N) # mean free gaussian signal
x = x - np.mean(x)
if ro != 0: # corr
y = math.sqrt(1-ro**2)*scipy.signal.lfilter([1, 0], [1, -ro], x)
y = y - np.mean(y)
else:
y = x
y = y*(2**log2_std) + mu # add mean and standard derivation
if is_signed:
data = np.clip(np.round(y), -(1 << (B-1)), (1 << (B-1))-1)
else:
data = np.clip(np.round(y), 0, (1 << B)-1)
return data.astype(int)
def lognormal_data(N, B, log2_std, mu, ro=0, is_signed=True):
'''
generates N lognornmal distributed B-bit patterns
with a std derivation (sigma) of 2**std (std given in bits/log2),
a mean of mu, and a rel. pattern correlation of ro
OPTIONAL:
is_signed = if true according bit vectors are unsigned
DEFAULT: True
'''
N = int(N)
x = np.random.lognormal(0, 1, N) # mean free lognormal signal
x = x - np.mean(x)
if ro != 0: # corr
y = math.sqrt(1-ro**2)*scipy.signal.lfilter([1, 0], [1, -ro], x)
y = y - np.mean(y)
else:
y = x
y = y*(2**log2_std) + mu # add mean and standard derivation
if is_signed:
data = np.clip(np.round(y), -(1 << (B-1)), (1 << (B-1))-1)
else:
data = np.clip(np.round(y), 0, (1 << B)-1)
return data.astype(int)
def autocorr(data, k=1):
'''
calculates the k-th value of the autocorrelation coefficients
of the data stream
'''
data_mf = np.array(data) - np.mean(data) # remove mean
y = data_mf[k:]*data_mf[:len(data_mf)-k]
autocorr = y.mean()/data_mf.var()
return autocorr
def corr(data1, data2, k=0):
'''
calculates the k-th value of the correlation coeffs of the two data streams
(!TESTED!)
'''
n = min(len(data1), len(data2))
data1_mf = np.array(data1)-np.mean(np.array(data1)) # remove mean
data2_mf = np.array(data2)-np.mean(np.array(data2))
y = data1_mf[k:n]*data2_mf[:n-k]
corr = y.mean()/(data1_mf.std()*data2_mf.std())
return corr
# # # ------ CLASS FOR DATA STREAM --------------------------------------------
class DataStream():
'''
class for concrete data_stream.
TWO POSSIBLE INSTANTIATIONS:
# 1 data_stream_module.data_stream( ...)
# 2 data_stream_module.data_stream.from_Stoch( ...)
(used if samples/samples of the data_stream are not known,
but the stochastic properties --> see according docstring)
VARS FOR #1: samples = (int) samples of the data stream
B = bit width
fit_to_B = if the specified samples can not be presented by
B-bit, the LSBs are removed until it fits
is_signed = signed binary repr ?
id = identifier string
'''
version = "0.1" # class version
def __init__(self, samples, B=8, fit_to_B=False, is_signed=False, id=None):
samples = | np.array(samples) | numpy.array |
"""
Module containing functions which plot univariate histograms
(`distpy.util.TrianglePlot.univariate_histogram`), bivariate histograms
(`distpy.util.TrianglePlot.bivariate_histogram`), and triangle plots
(`distpy.util.TrianglePlot.triangle_plot`), which are really just combinations
of the previous two types.
**File**: $DISTPY/distpy/util/TrianglePlot.py
**Author**: <NAME>
**Date**: 15 May 2021
"""
from __future__ import division
import numpy as np
import scipy.linalg as scila
from .TypeCategories import real_numerical_types, sequence_types
try:
import matplotlib.pyplot as pl
from matplotlib.ticker import StrMethodFormatter
except:
have_matplotlib = False
else:
have_matplotlib = True
no_matplotlib_error = ImportError("matplotlib cannot be imported.")
def univariate_histogram(sample, reference_value=None, bins=None,\
matplotlib_function='fill_between', show_intervals=False, xlabel='',\
ylabel='', title='', fontsize=28, ax=None, show=False, norm_by_max=True,\
**kwargs):
"""
Plots a 1D histogram of the given sample.
Parameters
----------
sample : sequence
the 1D sample of which to take a histogram
reference_value : real number or None
if given, a point at which to plot a dashed reference line
bins : int, sequence, or None
bins to pass to `numpy.histogram` function
matplotlib_function : str
either 'fill_between', 'bar', or 'plot'
show_intervals : bool
if True, 95% confidence intervals are plotted
xlabel : str
the string to use in labeling x axis
ylabel : str
the string to use in labeling y axis
title : str
title string with which to top plot
fontsize : int, str, or None
integer size in points or one of ['xx-small', 'x-small', 'small',
'medium', 'large', 'x-large', 'xx-large'] representing size of labels
ax : matplotlib.Axes or None
- if None, new Figure and Axes are created
- otherwise, this Axes object is plotted on
show : bool
if True, `matplotlib.pyplot.show` is called before this function
returns
norm_by_max : bool
if True, normalization is such that maximum of histogram values is 1.
kwargs : dict
keyword arguments to pass on to `matplotlib.Axes.plot` or
`matplotlib.Axes.fill_between`
Returns
-------
axes : matplotlib.Axes or None
- if `show` is True, None is returned
- otherwise, the Axes instance plotted on is returned
"""
if not have_matplotlib:
raise no_matplotlib_error
if type(ax) is type(None):
fig = pl.figure()
ax = fig.add_subplot(111)
(nums, bins) = np.histogram(sample, bins=bins, density=True)
bin_centers = (bins[1:] + bins[:-1]) / 2
num_bins = len(bin_centers)
if norm_by_max:
nums = nums / np.max(nums)
ylim = (0, 1.1 * np.max(nums))
if 'color' in kwargs:
color = kwargs['color']
del kwargs['color']
else:
# 95% interval color
color = 'C0'
cumulative = np.cumsum(nums)
cumulative = cumulative / cumulative[-1]
cumulative_is_less_than_025 = np.argmax(cumulative > 0.025)
cumulative_is_more_than_975 = np.argmax(cumulative > 0.975) + 1
interval_95p =\
(cumulative_is_less_than_025, cumulative_is_more_than_975 + 1)
if matplotlib_function in ['bar', 'plot']:
if matplotlib_function == 'bar':
ax.bar(bin_centers, nums,\
width=(bins[-1] - bins[0]) / num_bins, color=color, **kwargs)
else:
ax.plot(bin_centers, nums, color=color, **kwargs)
if show_intervals:
ax.plot([bins[interval_95p[0]]]*2, ylim, color='r', linestyle='--')
ax.plot([bins[interval_95p[1]]]*2, ylim, color='r', linestyle='--')
elif matplotlib_function == 'fill_between':
if show_intervals:
ax.plot(bin_centers, nums, color='k', linewidth=1)
half_bins = np.linspace(bins[0], bins[-1], (2 * len(bins)) - 1)
interpolated_nums = np.interp(half_bins, bin_centers, nums)
ax.fill_between(\
half_bins[2*interval_95p[0]:2*interval_95p[1]],\
np.zeros((2 * (interval_95p[1] - interval_95p[0]),)),\
interpolated_nums[2*interval_95p[0]:2*interval_95p[1]],\
color=color)
ax.fill_between(bin_centers, nums,\
np.ones_like(nums) * 1.5 * np.max(nums), color='w')
else:
ax.fill_between(bin_centers, np.zeros_like(nums), nums,\
color=color, **kwargs)
else:
raise ValueError("matplotlib_function not recognized.")
ax.set_ylim(ylim)
if type(reference_value) is not type(None):
ax.plot([reference_value] * 2, ylim, color='r', linewidth=1,\
linestyle='--')
ax.set_ylim(ylim)
ax.set_xlim((bins[0], bins[-1]))
ax.set_xlabel(xlabel, size=fontsize)
ax.set_ylabel(ylabel, size=fontsize)
ax.set_title(title, size=fontsize)
ax.tick_params(width=2, length=6, labelsize=fontsize)
if show:
pl.show()
else:
return ax
def confidence_contour_2D(xsample, ysample, nums=None,\
confidence_contours=0.95, hist_kwargs={}):
"""
Finds the posterior distribution levels which represent the boundaries of
confidence intervals of the given confidence level(s) in two dimensions.
Parameters
----------
xsample : sequence
1D sample corresponding to variable on x-axis
ysample : sequence
1D sample corresponding to variable on y-axis
nums : numpy.ndarray or None
if histogram has already been created, the histogram values can be
passed here as a 2D numpy.ndarray.
if nums is None, `numpy.histogram2d` is called in this function
confidence_contours : number or sequence of numbers
confidence level as a number between 0 and 1 or a 1D array of such
numbers
hist_kwargs : dict
keyword arguments to pass to `numpy.histogram2d` function (only used
if `nums` is None)
Returns
-------
contour_boundaries: sequence
1D sequence of boundaries of contours corresponding to given confidence
level(s)
"""
if type(nums) is type(None):
(nums, xedges, yedges) =\
np.histogram2d(xsample, ysample, **hist_kwargs)
nums = np.sort(nums.flatten())
cdf_values = np.cumsum(nums)
cdf_values = (cdf_values / cdf_values[-1])
confidence_levels = 1 - cdf_values
if type(confidence_contours) in real_numerical_types:
confidence_contours = [confidence_contours]
if type(confidence_contours) in sequence_types:
confidence_contours = np.sort(confidence_contours)
return np.where(np.all(confidence_levels[np.newaxis,:] <=\
confidence_contours[:,np.newaxis], axis=-1), nums[0],\
np.interp(confidence_contours, confidence_levels[-1::-1],\
nums[-1::-1]))
else:
raise TypeError("confidence_contours was set to neither a single " +\
"number or a 1D sequence of numbers.")
def bivariate_histogram(xsample, ysample, reference_value_mean=None,\
reference_value_covariance=None, bins=None, matplotlib_function='imshow',\
xlabel='', ylabel='', title='', fontsize=28, ax=None, show=False,\
contour_confidence_levels=0.95, reference_color='r', reference_alpha=1,\
minima=None, maxima=None, num_ellipse_points=1000,\
xs_for_reference_lines=None, ys_for_reference_lines=None, **kwargs):
"""
Plots a 2D histogram of the given joint sample.
Parameters
----------
xsample : sequence
1D sample corresponding to variable on x-axis
ysample : sequence
1D sample corresponding to variable on y-axis
reference_value_mean : sequence or None
- if None, no reference line is plotted
- otherwise, sequence of two elements representing the reference value
for x- and y-samples. Each element can be either None (if no reference
line should be plotted) or a value at which to plot a reference line.
reference_value_covariance: numpy.ndarray or None
- if `numpy.ndarray`, represents the covariance matrix used to generate
a reference ellipse around the reference mean.
- if None or if one or more of `reference_value_mean` is None, no
ellipse is plotted
bins : int, sequence, or None
bins to pass to `numpy.histogram2d`
matplotlib_function : str
function to use in plotting. One of ['imshow', 'contour', 'contourf'].
xlabel : str
the string to use in labeling x axis
ylabel : str
the string to use in labeling y axis
title : str
title string with which to top plot
fontsize : int, str, or None
integer size in points or one of ['xx-small', 'x-small', 'small',
'medium', 'large', 'x-large', 'xx-large'] representing size of labels
ax : matplotlib.Axes or None
- if None, new Figure and Axes are created
- otherwise, this Axes object is plotted on
show : bool
if True, `matplotlib.pyplot.show` is called before this function
returns
contour_confidence_levels : number or sequence of numbers
confidence level as a number between 0 and 1 or a 1D array of such
numbers. Only used if `matplotlib_function` is `'contour'` or
`'contourf'` or if `reference_value_mean` and
`reference_value_covariance` are both not None
minima : sequence
sequence of the form `(min_X, min_Y)` to take into account when
plotting ellipses (only used if `reference_value_covariance` is not
None)
maxima : sequence
sequence of the form `(max_X, max_Y)` to take into account when
plotting ellipses (only used if `reference_value_covariance` is not
None)
kwargs : dict
keyword arguments to pass on to `matplotlib.Axes.imshow` (any but
'origin', 'extent', or 'aspect') or `matplotlib.Axes.contour` or
`matplotlib.Axes.contourf` (any)
Returns
-------
axes : matplotlib.Axes or None
- if `show` is True, None is returned
- otherwise, the Axes instance plotted on is returned
"""
if not have_matplotlib:
raise no_matplotlib_error
if type(ax) is type(None):
fig = pl.figure()
ax = fig.add_subplot(111)
(nums, xbins, ybins) = np.histogram2d(xsample, ysample, bins=bins)
if matplotlib_function == 'contour':
nums = np.concatenate([np.zeros((1, nums.shape[1])), nums,\
np.zeros((1, nums.shape[1]))], axis=0)
nums = np.concatenate([np.zeros((nums.shape[0], 1)), nums,\
np.zeros((nums.shape[0], 1))], axis=1)
xbins = np.concatenate([[(2 * xbins[0]) - xbins[1]], xbins,\
[(2 * xbins[-1]) - xbins[-2]]])
ybins = np.concatenate([[(2 * ybins[0]) - ybins[1]], ybins,\
[(2 * ybins[-1]) - ybins[-2]]])
xlim = (xbins[0], xbins[-1])
ylim = (ybins[0], ybins[-1])
xbin_centers = (xbins[1:] + xbins[:-1]) / 2
ybin_centers = (ybins[1:] + ybins[:-1]) / 2
if matplotlib_function == 'imshow':
ax.imshow(nums.T, origin='lower',\
extent=[xlim[0], xlim[1], ylim[0], ylim[1]], aspect='auto',\
**kwargs)
else:
pdf_max = np.max(nums)
contour_levels = confidence_contour_2D(xsample, ysample, nums=nums,\
confidence_contours=contour_confidence_levels)
contour_levels = | np.sort(contour_levels) | numpy.sort |
import numpy as np
import pandas as pd
def get_converging_models_option1(conc_df_interp: pd.DataFrame, n_models: int) -> list:
non_converging_models = []
for model_i in range(n_models):
last_conc_values = \
conc_df_interp[(conc_df_interp['model'] == model_i) & (conc_df_interp['time_point'].between(0.75, 1.05))][
'concentration'].values
if len(last_conc_values) == 0 or np.any( | np.abs(last_conc_values) | numpy.abs |
#FP LDOS, Data Loaders
import os, sys
import numpy as np
import timeit
#import torch.nn as nn
#import torch.nn.functional as F
import torch
import torch.multiprocessing as mp
#import torch.utils.Dataset
import torch.utils.data.distributed
import torch.utils.data
import torch.utils
import horovod.torch as hvd
sys.path.append("./src/charm/clustering")
import cluster_fingerprints
###-----------------------------------------------------------------------###
# Big Data Dataset for training data that does not fit into memory
class Big_Charm_Dataset(torch.utils.data.Dataset):
def __init__(self, args, \
input_fpaths, \
output_fpaths, \
num_samples, \
input_sample_shape, \
output_sample_shape, \
input_subset, \
output_subset, \
input_scaler_kwargs={}, \
output_scaler_kwargs={}): #, \
#do_reset=True):
# input:
## args: Argparser args
## input_fpaths: paths to input numpy files
## output_fpaths: paths to output numpy files
## num_samples: number of samples per file
## input_sample_shape: shape of input sample
## output_sample_shape: shape of output sample
## input_subset: take subset of numpy file sample to
## fit input_sample_shape
## output_subset: take subset of numpy file sample to
## fit input_sample_shape
## input_scaler_kwargs: dict of input scaler options
## output_scalar_kwargs: dict of output scaler options
self.args = args
self.input_fpaths = input_fpaths
self.output_fpaths = output_fpaths
self.num_samples = num_samples
self.input_shape = np.insert(input_sample_shape, 0, num_samples)
self.output_shape = np.insert(output_sample_shape, 0, num_samples)
self.input_mask = np.zeros(input_sample_shape, dtype=bool)
self.output_mask = np.zeros(output_sample_shape, dtype=bool)
self.input_mask[input_subset] = True
self.output_mask[output_subset] = True
# self.input_subset = input_subset
# self.output_subset = output_subset
self.num_files = len(input_fpaths)
self.reset = True
if (self.num_files == 1):
self.reset = False
# print("Num files: %d" % self.num_files)
if (self.num_files == 0):
raise ValueError("\n\nNo files provided to the Big Charm Dataset. Exiting.\n\n")
if (self.num_files != len(output_fpaths)):
raise ValueError("\nInput file list not equal in length " + \
"with Output file list. Exiting.\n\n")
tic = timeit.default_timer()
print("Input scaling.")
self.input_scaler = Big_Data_Scaler(input_fpaths, \
num_samples, \
input_sample_shape, \
input_subset, \
**input_scaler_kwargs)
toc = timeit.default_timer()
self.is_input_scaled = not self.input_scaler.no_scaling
print("Input Scaler Timing: %4.4f" % (toc - tic))
hvd.allreduce(torch.tensor(0), name="barrier")
tic = timeit.default_timer()
print("Output scaling.")
self.output_scaler = Big_Data_Scaler(output_fpaths, \
num_samples, \
output_sample_shape, \
output_subset, \
**output_scaler_kwargs)
toc = timeit.default_timer()
self.is_output_scaled = not self.output_scaler.no_scaling
print("Output Scaler Timing: %4.4f" % (toc - tic))
hvd.allreduce(torch.tensor(0), name="barrier")
if (hvd.rank() == 0):
print("Input FP Factors")
self.input_scaler.print_factors()
print("Output LDOS Factors")
self.output_scaler.print_factors()
hvd.allreduce(torch.tensor(0), name="barrier")
# print("\n\nDone.\n\n")
# exit(0);
# List of numpy arrays to preserve mmap_mode
# self.input_datasets = []
# self.output_datasets = []
# Load Datasets
# for idx, path in enumerate(input_fpaths):
# print("Input: %d" % idx)
# self.input_datasets.append(np.load(path, mmap_mode=mmap_mode))
# hvd.allreduce(torch.tensor(0), name="barrier")
# for idx, path in enumerate(output_fpaths):
# print("Output: %d" % idx)
# self.output_datasets.append(np.load(path, mmap_mode=mmap_mode))
# hvd.allreduce(torch.tensor(0), name="barrier")
# Input subset and reshape
# for i in range(self.num_files):
# self.input_datasets[i] = np.reshape(self.input_datasets[i], \
# self.input_shape)
# if (input_subset is not None):
# self.input_datasets[i] = self.input_datasets[i][:, input_subset]
# Output subset and reshape
# for i in range(self.num_files):
# self.output_datasets[i] = np.reshape(self.output_datasets[i], \
# self.output_shape)
# if (output_subset is not None):
# self.output_datasets[i] = self.output_datasets[i][:, output_subset]
self.file_idxs = np.random.permutation(np.arange(self.num_files))
self.current_file = 0
self.current_sample = 0
self.barrier = mp.Barrier(self.args.num_data_workers)
# self.lock = torch.multiprocessing.Lock()
# Set the starting dataset
self.input_dataset = None
self.output_dataset = None
self.reset_dataset()
# self.lock.acquire()
# self.lock.release()
def set_scalers(self, input_scaler, output_scaler):
self.input_scaler = input_scaler
self.output_scaler = output_scaler
if (not self.is_input_scaled):
self.input_dataset = self.input_scaler.do_scaling_sample(self.input_dataset)
self.is_input_scaled = True
else:
raise ValueError("\n\nBig Clustered Dataset INPUT already scaled. Exiting.\n\n")
if (not self.is_output_scaled):
self.output_dataset = self.output_scaler.do_scaling_sample(self.output_dataset)
self.is_output_scaled = True
else:
raise ValueError("\n\nBig Clustered Dataset OUTPUT already scaled. Exiting.\n\n")
def reset_dataset(self):
# Clean out memory, because mmap brings those values into memory
# del self.input_datasets
# del self.output_datasets
# self.input_datasets = []
# self.output_datasets = []
# Load Datasets
# for idx, path in enumerate(input_fpaths):
# self.input_datasets[i] = np.load(path, mmap_mode=mmap_mode)
# for idx, path in enumerate(output_fpaths):
# self.output_datasets[i] = np.load(path, mmap_mode=mmap_mode)
# Input/Output reshape
# for i in range(self.num_files):
# self.input_datasets[i] = np.reshape(self.input_datasets[i], \
# self.input_shape)
# self.output_datasets[i] = np.reshape(self.output_datasets[i], \
# self.output_shape)
# print("Rank: %d, Reset dataset %d of %d for all workers. Current_sample: %d" % \
# (hvd.rank(), self.current_file + 1, self.num_files, self.current_sample))
# print("Rank: %d, Parent PID: %d, Current PID: %d" % \
# (hvd.rank(), os.getppid(), os.getpid()))
# Lock threads for data reset
# self.lock.acquire();
# print("Rank: %d, Reset dataset %d of %d for mp-locked workers." % \
# (hvd.rank(), self.current_file + 1, self.num_files))
if (self.current_file == self.num_files):
self.file_idxs = np.random.permutation(np.arange(self.num_files))
self.current_file = 0
del self.input_dataset
del self.output_dataset
# Load file into memory
self.input_dataset = np.load(self.input_fpaths[self.file_idxs[self.current_file]])
self.output_dataset = np.load(self.output_fpaths[self.file_idxs[self.current_file]])
# Reshape data
self.input_dataset = np.reshape(self.input_dataset, \
self.input_shape)
self.output_dataset = np.reshape(self.output_dataset, \
self.output_shape)
# Subset data
self.input_dataset = self.input_dataset[:, self.input_mask]
self.output_dataset = self.output_dataset[:, self.output_mask]
# Scale data
self.input_dataset = self.input_scaler.do_scaling_sample(self.input_dataset)
self.output_dataset = self.output_scaler.do_scaling_sample(self.output_dataset)
# print("Input fp valuee:", self.input_dataset[3+92+113, :])
# self.mutex = mp.Semaphore(1)
# self.barrier_sema = mp.Semaphore(0)
# self.barrier = mp.Barrier(self.args.num_data_workers)
# self.current_file += 1
# self.current_sample = 0
# self.lock.release()
# Fetch a sample
def __getitem__(self, idx):
# idx to vector location
# file_idx = idx // self.num_samples
# sample_idx = idx % self.num_samples
# read data
# sample_input = self.input_datasets[file_idx][sample_idx]
# sample_output = self.output_datasets[file_idx][sample_idx]
# subset and scale data
# scaled_input = self.input_scaler.do_scaling_sample(sample_input[self.input_subset])
# scaled_output = self.output_scaler.do_scaling_sample(sample_output[self.output_subset])
# create torch tensor
# input_tensor = torch.tensor(scaled_input, dtype=torch.float32)
# output_tensor = torch.tensor(scaled_output, dtype=torch.float32)
if (self.reset and self.current_sample >= (self.num_samples / hvd.size())):
#self.mp_complete = mp.Value('i', False, lock=False)
#self.lock.acquire()
# print("Rank %d, Before PID: %d" % (hvd.rank(), os.getpid()))
pid = self.barrier.wait()
print("Rank %d, Reset PID: %d" % (hvd.rank(), pid))
self.current_file += 1
#if (not self.mp_complete.value):
if (pid == 0):
print("Rank: %d, Entering reset datset on PID %d" % (hvd.rank(), pid))
self.reset_dataset()
#self.current_file += 1
self.current_sample = 0
print("Rank: %d, PID %d waiting or Done" % (hvd.rank(), pid))
self.barrier.wait()
# print("Rank: %d, Current_file Before: %d" % (hvd.rank(), self.current_file))
# self.mp_complete.value = True
# self.lock.release()
# self.barrier.acquire()
# self.barrier.release()
# print("Rank: %d, Current_file After: %d" % (hvd.rank(), self.current_file))
self.current_sample += self.args.num_data_workers
sample_idx = idx % self.num_samples
# if (self.current_sample % 1000 == 0):
# print("CurrSample: %d, SampleIDX: %d" % (self.current_sample, sample_idx))
input_tensor = torch.tensor(self.input_dataset[sample_idx, :], dtype=torch.float32)
output_tensor = torch.tensor(self.output_dataset[sample_idx, :], dtype=torch.float32)
return input_tensor, output_tensor
# Number of samples in dataset
def __len__(self):
return self.num_files * self.num_samples
###-----------------------------------------------------------------------###
# Big Data Dataset for training data that does not fit into memory
class Big_Charm_Clustered_Dataset(torch.utils.data.Dataset):
def __init__(self, args, \
input_fpaths, \
output_fpaths, \
num_samples, \
input_sample_shape, \
output_sample_shape, \
input_subset, \
output_subset, \
input_scaler_kwargs={}, \
output_scaler_kwargs={}): #, \
#do_reset=True):
# input:
## args: Argparser args
## input_fpaths: paths to input numpy files
## output_fpaths: paths to output numpy files
## num_samples: number of samples per file
## input_sample_shape: shape of input sample
## output_sample_shape: shape of output sample
## input_subset: take subset of numpy file sample to
## fit input_sample_shape
## output_subset: take subset of numpy file sample to
## fit input_sample_shape
## input_scaler_kwargs: dict of input scaler options
## output_scalar_kwargs: dict of output scaler options
self.args = args
self.input_fpaths = input_fpaths
self.output_fpaths = output_fpaths
self.num_samples = num_samples
self.input_shape = np.insert(input_sample_shape, 0, num_samples)
self.output_shape = np.insert(output_sample_shape, 0, num_samples)
self.input_mask = np.zeros(input_sample_shape, dtype=bool)
self.output_mask = np.zeros(output_sample_shape, dtype=bool)
self.input_mask[input_subset] = True
self.output_mask[output_subset] = True
self.num_files = len(input_fpaths)
# Cluster params
self.num_clusters = args.num_clusters
self.cluster_train_ratio = args.cluster_train_ratio
self.cluster_sample_ratio = args.cluster_sample_ratio
self.reset = True
if (self.num_files == 1):
self.reset = False
if (self.num_files == 0):
raise ValueError("\n\nNo files provided to the Big Charm Dataset. Exiting.\n\n")
if (self.num_files != len(output_fpaths)):
raise ValueError("\nInput file list not equal in length " + \
"with Output file list. Exiting.\n\n")
tic = timeit.default_timer()
print("Input scaling.")
self.input_scaler = Big_Data_Scaler(input_fpaths, \
num_samples, \
input_sample_shape, \
input_subset, \
**input_scaler_kwargs)
toc = timeit.default_timer()
self.is_input_scaled = not self.input_scaler.no_scaling
print("Input Scaler Timing: %4.4f" % (toc - tic))
hvd.allreduce(torch.tensor(0), name="barrier")
tic = timeit.default_timer()
print("Output scaling.")
self.output_scaler = Big_Data_Scaler(output_fpaths, \
num_samples, \
output_sample_shape, \
output_subset, \
**output_scaler_kwargs)
toc = timeit.default_timer()
self.is_output_scaled = not self.output_scaler.no_scaling
print("Output Scaler Timing: %4.4f" % (toc - tic))
hvd.allreduce(torch.tensor(0), name="barrier")
if (hvd.rank() == 0):
print("Input FP Factors")
self.input_scaler.print_factors()
print("Output LDOS Factors")
self.output_scaler.print_factors()
hvd.allreduce(torch.tensor(0), name="barrier")
self.clustered_inputs = np.zeros([self.num_files, self.num_samples])
self.samples_per_cluster = np.zeros([self.num_files, args.num_clusters])
for idx, fpath in enumerate(input_fpaths):
print("Clustering file %d: %s" % (idx, fpath))
tic = timeit.default_timer()
self.clustered_inputs[idx, :] = cluster_fingerprints.cluster_snapshot(fpath, \
self.num_samples, \
self.input_shape, \
self.input_mask, \
self.input_scaler, \
self.num_clusters, \
self.cluster_train_ratio)
toc = timeit.default_timer()
print("Clustering time %d: %4.4f" % (idx, toc - tic))
for i in range(args.num_clusters):
self.samples_per_cluster[idx, i] = np.sum(self.clustered_inputs[idx, :] == i, dtype=np.int64)
if (hvd.rank() == 0):
print("Cluster %d: %d" % (i, self.samples_per_cluster[idx, i]))
# if (self.samples_per_cluster[idx, i] == 0):
# raise ValueError("\n\nCluster %d of file %s has no samples!\n\n" % (i, fpath))
if (np.sum(self.samples_per_cluster[idx, :]) != self.num_samples):
raise ValueError("\n\nSamplers per cluster sum: %d, not equal to total num samples: %d\n\n" % (np.sum(self.samples_per_cluster[idx,:]), self.num_samples))
# print("\n\nDone\n\n")
# exit(0);
# for i in range(self.num_clusters):
# self.cluster_idxs.append(self.clustered_inputs[])
self.file_idxs = np.random.permutation(np.arange(self.num_files))
self.current_file = 0
self.current_sample = 0
self.barrier = mp.Barrier(self.args.num_data_workers)
# Set the starting dataset
self.input_dataset = None
self.output_dataset = None
self.cluster_idxs = [None] * self.num_clusters
self.sampling_idxs = [None] * self.num_clusters
self.current_sampling_idx = [None] * self.num_clusters
self.reset_dataset()
def set_scalers(self, input_scaler, output_scaler):
self.input_scaler = input_scaler
self.output_scaler = output_scaler
if (not self.is_input_scaled):
self.input_dataset = self.input_scaler.do_scaling_sample(self.input_dataset)
else:
raise ValueError("\n\nBig Clustered Dataset INPUT already scaled. Exiting.\n\n")
if (not self.is_output_scaled):
self.output_dataset = self.output_scaler.do_scaling_sample(self.output_dataset)
else:
raise ValueError("\n\nBig Clustered Dataset OUTPUT already scaled. Exiting.\n\n")
# pick a sample within some cluster
def get_clustered_idx(self, idx):
cluster = int(idx % self.num_clusters)
file_idx = self.file_idxs[self.current_file]
num_cluster_samples = self.samples_per_cluster[file_idx, cluster]
# Rejection sampling
if (num_cluster_samples == 0):
bad_iters = 0
while (num_cluster_samples == 0):
cluster = | np.random.randint(self.num_clusters) | numpy.random.randint |
import os
import logging
import datetime
from pathlib import Path
from collections import OrderedDict
import numpy as np
import pytest
from pandas import DataFrame
import astropy.units as u
from astropy.io import fits
from astropy.table import Table
from astropy.time import TimeDelta
import sunpy.io
import sunpy.net.attrs as a
import sunpy.timeseries
from sunpy.data.test import get_test_filepath, rootdir, test_data_filenames
from sunpy.net import Fido
from sunpy.time import parse_time
from sunpy.util import SunpyUserWarning
from sunpy.util.datatype_factory_base import NoMatchError
from sunpy.util.metadata import MetaDict
eve_filepath = get_test_filepath('EVE_L0CS_DIODES_1m_truncated.txt')
eve_many_filepath = [f for f in test_data_filenames()
if f.parents[0].relative_to(f.parents[1]).name == 'eve']
goes_filepath = get_test_filepath('go1520110607.fits')
psp_filepath = get_test_filepath('psp_fld_l2_mag_rtn_1min_20200104_v02.cdf')
swa_filepath = get_test_filepath('solo_L1_swa-pas-mom_20200706_V01.cdf')
fermi_gbm_filepath = get_test_filepath('gbm.fits')
@pytest.mark.filterwarnings('ignore:Unknown units')
def test_factory_concatenate_same_source():
# Test making a TimeSeries that is the concatenation of multiple files
ts_from_list = sunpy.timeseries.TimeSeries(eve_many_filepath, source='EVE', concatenate=True)
assert isinstance(ts_from_list, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)
ts_from_folder = sunpy.timeseries.TimeSeries(
eve_many_filepath[0].parent, source='EVE', concatenate=True)
assert isinstance(ts_from_folder, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)
# text the two methods get identical dataframes
assert ts_from_list == ts_from_folder
# test the frames have correct headings/keys (correct concatenation axis)
ts_from_list.columns == sunpy.timeseries.TimeSeries(
eve_many_filepath[0], source='EVE', concatenate=True).columns
@pytest.mark.filterwarnings('ignore:Unknown units')
def test_factory_concatenate_different_source():
# Test making a TimeSeries that is the concatenation of multiple files
ts_from_list = sunpy.timeseries.TimeSeries(eve_many_filepath, source='EVE', concatenate=True)
assert isinstance(ts_from_list, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)
ts_from_folder = sunpy.timeseries.TimeSeries(
eve_many_filepath[0].parent, source='EVE', concatenate=True)
assert isinstance(ts_from_folder, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)
# text the two methods get identical dataframes
assert ts_from_list == ts_from_folder
# test the frames have correct headings/keys (correct concatenation axis)
ts_from_list.columns == sunpy.timeseries.TimeSeries(
eve_many_filepath[0], source='EVE', concatenate=True).columns
@pytest.mark.filterwarnings('ignore:Unknown units')
def test_factory_generate_list_of_ts():
# Test making a list TimeSeries from multiple files
ts_list = sunpy.timeseries.TimeSeries(eve_many_filepath, source='EVE')
assert isinstance(ts_list, list)
for ts in ts_list:
assert isinstance(ts, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)
@pytest.mark.filterwarnings('ignore:Unknown units')
def test_factory_generate_from_glob():
# Test making a TimeSeries from a glob
ts_from_glob = sunpy.timeseries.TimeSeries(os.path.join(
rootdir, "eve", "*"), source='EVE', concatenate=True)
assert isinstance(ts_from_glob, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)
@pytest.mark.filterwarnings('ignore:Unknown units')
def test_factory_generate_from_pathlib():
# Test making a TimeSeries from a : pathlib.PosixPath
ts_from_pathlib = sunpy.timeseries.TimeSeries(Path(fermi_gbm_filepath),
source="GBMSummary")
assert isinstance(ts_from_pathlib, sunpy.timeseries.sources.fermi_gbm.GBMSummaryTimeSeries)
@pytest.mark.remote_data
def test_from_url():
# This is the same PSP file we have in our test data, but accessed from a URL
url = ('https://spdf.gsfc.nasa.gov/pub/data/psp/fields/l2/mag_rtn_1min/2020/'
'psp_fld_l2_mag_rtn_1min_20200104_v02.cdf')
ts = sunpy.timeseries.TimeSeries(url)
assert isinstance(ts[0], sunpy.timeseries.GenericTimeSeries)
assert isinstance(ts[1], sunpy.timeseries.GenericTimeSeries)
def test_read_cdf():
ts_psp = sunpy.timeseries.TimeSeries(psp_filepath)
assert len(ts_psp) == 2
ts = ts_psp[0]
assert ts.columns == ['psp_fld_l2_mag_RTN_1min_0',
'psp_fld_l2_mag_RTN_1min_1',
'psp_fld_l2_mag_RTN_1min_2']
assert ts.quantity('psp_fld_l2_mag_RTN_1min_0').unit == u.nT
assert len(ts.quantity('psp_fld_l2_mag_RTN_1min_0')) == 118
ts = ts_psp[1]
assert ts.columns == ['psp_fld_l2_quality_flags']
assert ts.quantity('psp_fld_l2_quality_flags').unit == u.dimensionless_unscaled
assert len(ts.quantity('psp_fld_l2_quality_flags')) == 1440
@pytest.mark.remote_data
def test_read_cdf_empty_variable():
# This tests that:
# - A CDF file with an empty column can be read
# - Unknown unit handling works as expected
result = sunpy.net.Fido.search(a.Time('2020-01-01', '2020-01-02'),
a.cdaweb.Dataset('AC_H6_SWI'))
filename = Fido.fetch(result[0, 0])
# Temporarily reset sunpy.io.cdf registry of known unit conversions
import sunpy.io.cdf as sunpy_cdf
known_units = sunpy_cdf._known_units
sunpy_cdf._known_units = {}
with pytest.warns(SunpyUserWarning, match='Assigning dimensionless units'):
ts = sunpy.timeseries.TimeSeries(filename)
assert ts.quantity('nH').unit == u.dimensionless_unscaled
# Put back known unit registry, and check that units are recognised
sunpy_cdf._known_units = known_units
ts = sunpy.timeseries.TimeSeries(filename)
assert ts.quantity('nH').unit == u.cm**-3
# Reset again to check that registring units via. astropy works too
sunpy_cdf._known_units = {}
u.add_enabled_units([u.def_unit('#/cm^3', represents=u.cm**-3)])
ts = sunpy.timeseries.TimeSeries(filename)
assert ts.quantity('nH').unit == u.cm**-3
sunpy_cdf._known_units = known_units
def test_read_empty_cdf(caplog):
with caplog.at_level(logging.DEBUG, logger='sunpy'):
ts_empty = sunpy.timeseries.TimeSeries(swa_filepath)
assert ts_empty == []
assert "No data found in file" in caplog.text
assert "solo_L1_swa-pas-mom_20200706_V01.cdf" in caplog.text
def test_meta_from_fits_header():
# Generate the data and the corrisponding dates
base = parse_time(datetime.datetime.today())
times = base - TimeDelta(np.arange(24*60)*u.minute)
intensity = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))
units = {'intensity': u.W/u.m**2}
data = DataFrame(intensity, index=times, columns=['intensity'])
# Use a FITS file HDU using sunpy.io
hdulist = sunpy.io.read_file(goes_filepath)
meta = hdulist[0].header
meta_md = MetaDict(OrderedDict(meta))
ts_hdu_meta = sunpy.timeseries.TimeSeries(data, meta, units)
ts_md_meta = sunpy.timeseries.TimeSeries(data, meta_md, units)
assert ts_hdu_meta == ts_md_meta
# Use a FITS file HDU using astropy.io
hdulist = fits.open(goes_filepath)
meta = hdulist[0].header
hdulist.close()
meta_md = MetaDict(sunpy.io.header.FileHeader(meta))
ts_hdu_meta = sunpy.timeseries.TimeSeries(data, meta, units)
ts_md_meta = sunpy.timeseries.TimeSeries(data, meta_md, units)
assert ts_hdu_meta == ts_md_meta
def test_generic_construction_basic():
# Generate the data and the corrisponding dates
base = parse_time(datetime.datetime.today())
times = base - TimeDelta(np.arange(24 * 60)*u.minute)
intensity = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))
# Create the data DataFrame, header MetaDict and units OrderedDict
data = DataFrame(intensity, index=times, columns=['intensity'])
units = OrderedDict([('intensity', u.W/u.m**2)])
meta = MetaDict({'key': 'value'})
# Create normal TS from dataframe and check
ts_generic = sunpy.timeseries.TimeSeries(data, meta, units)
assert isinstance(ts_generic, sunpy.timeseries.timeseriesbase.GenericTimeSeries)
assert ts_generic.columns == ['intensity']
assert ts_generic.units == units
assert ts_generic.meta.metadata[0][2] == meta
# Create TS using a tuple of values
ts_tuple = sunpy.timeseries.TimeSeries(((data, meta, units),))
assert isinstance(ts_tuple, sunpy.timeseries.timeseriesbase.GenericTimeSeries)
assert ts_generic == ts_tuple
def test_generic_construction_basic_omitted_details():
# Generate the data and the corrisponding dates
base = parse_time(datetime.datetime.today())
times = base - TimeDelta(np.arange(24 * 60)*u.minute)
intensity = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))
# Create the data DataFrame, header MetaDict and units OrderedDict
data = DataFrame(intensity, index=times, columns=['intensity'])
units = OrderedDict([('intensity', u.W/u.m**2)])
meta = MetaDict({'key': 'value'})
# Create TS omitting units input arguments
with pytest.warns(SunpyUserWarning, match='Unknown units for intensity'):
ts_1 = sunpy.timeseries.TimeSeries(data, meta)
assert isinstance(ts_1, sunpy.timeseries.timeseriesbase.GenericTimeSeries)
assert ts_1.columns == ['intensity']
assert ts_1.units == OrderedDict([('intensity', u.dimensionless_unscaled)])
assert ts_1.meta.metadata[0][2] == meta
ts_2 = sunpy.timeseries.TimeSeries(data, units)
assert isinstance(ts_2, sunpy.timeseries.timeseriesbase.GenericTimeSeries)
assert ts_2.columns == ['intensity']
assert ts_2.units == units
assert ts_2.meta.metadata[0][2] == MetaDict()
def test_generic_construction_basic_different_meta_types():
# Generate the data and the corrisponding dates
base = parse_time(datetime.datetime.today())
times = base - TimeDelta(np.arange(24 * 60)*u.minute)
intensity = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))
tr = sunpy.time.TimeRange(times[0], times[-1])
# Create the data DataFrame, header MetaDict and units OrderedDict
data = DataFrame(intensity, index=times, columns=['intensity'])
units = OrderedDict([('intensity', u.W/u.m**2)])
meta_md = MetaDict({'key': 'value'})
meta_di = {'key': 'value'}
meta_od = OrderedDict({'key': 'value'})
meta_obj = sunpy.timeseries.TimeSeriesMetaData(timerange=tr, colnames=['GOES'],
meta=MetaDict({'key': 'value'}))
# Create TS using different dictionary meta types
ts_md = sunpy.timeseries.TimeSeries(data, meta_md, units)
ts_di = sunpy.timeseries.TimeSeries(data, meta_di, units)
ts_od = sunpy.timeseries.TimeSeries(data, meta_od, units)
ts_obj = sunpy.timeseries.TimeSeries(data, meta_obj, units)
assert ts_md == ts_di == ts_od == ts_obj
assert ts_md.meta.metadata[0][2] == ts_di.meta.metadata[0][2] == ts_od.meta.metadata[0][2] == ts_obj.meta.metadata[0][2]
def test_generic_construction_ts_list():
# Generate the data and the corrisponding dates
base = parse_time(datetime.datetime.today())
times = base - TimeDelta(np.arange(24 * 60)*u.minute)
intensity1 = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))
intensity2 = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))
# Create the data DataFrame, header MetaDict and units OrderedDict
data = DataFrame(intensity1, index=times, columns=['intensity'])
data2 = DataFrame(intensity2, index=times, columns=['intensity2'])
units = OrderedDict([('intensity', u.W/u.m**2)])
units2 = OrderedDict([('intensity2', u.W/u.m**2)])
meta = MetaDict({'key': 'value'})
meta2 = MetaDict({'key2': 'value2'})
# Create TS individually
ts_1 = sunpy.timeseries.TimeSeries(data, meta, units)
ts_2 = sunpy.timeseries.TimeSeries(data2, meta2, units2)
# Create TS list using
ts_list = sunpy.timeseries.TimeSeries(data, meta, units, data2, meta2, units2)
assert isinstance(ts_list, list)
assert len(ts_list) == 2
assert ts_list[0] == ts_1
assert ts_list[1] == ts_2
# Create TS using a tuple
ts_list2 = sunpy.timeseries.TimeSeries(((data, meta, units), (data2, meta2, units2)))
assert ts_list == ts_list2
def test_generic_construction_concatenation():
# Generate the data and the corrisponding dates
base = parse_time(datetime.datetime.today())
times = base - TimeDelta( | np.arange(24 * 60) | numpy.arange |
# pylint: disable=missing-function-docstring, missing-module-docstring/
import pytest
import numpy as np
from numpy.random import randint
from pyccel.epyccel import epyccel
from modules import arrays
#==============================================================================
# TEST: 1D ARRAYS OF INT-32
#==============================================================================
def test_array_int32_1d_scalar_add(language):
f1 = arrays.array_int32_1d_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_sub(language):
f1 = arrays.array_int32_1d_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_mul(language):
f1 = arrays.array_int32_1d_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_div(language):
f1 = arrays.array_int32_1d_scalar_div
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_scalar_idiv(language):
f1 = arrays.array_int32_1d_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_add(language):
f1 = arrays.array_int32_1d_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_sub(language):
f1 = arrays.array_int32_1d_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_mul(language):
f1 = arrays.array_int32_1d_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_idiv(language):
f1 = arrays.array_int32_1d_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_add_augassign(language):
f1 = arrays.array_int32_1d_add_augassign
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_1d_sub_augassign(language):
f1 = arrays.array_int32_1d_sub_augassign
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [1,2,3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="Numpy sum not yet implemented for C language"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_int_1d_initialization_1(language):
f1 = arrays.array_int_1d_initialization_1
f2 = epyccel( f1 , language = language)
assert np.array_equal(f1(), f2())
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="Numpy sum not yet implemented for C language"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_int_1d_initialization_2(language):
f1 = arrays.array_int_1d_initialization_2
f2 = epyccel( f1 , language = language)
assert np.array_equal(f1(), f2())
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="Numpy sum not yet implemented for C language"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_int_1d_initialization_3(language):
f1 = arrays.array_int_1d_initialization_3
f2 = epyccel( f1 , language = language)
assert np.array_equal(f1(), f2())
#==============================================================================
# TEST: 2D ARRAYS OF INT-32 WITH C ORDERING
#==============================================================================
def test_array_int32_2d_C_scalar_add(language):
f1 = arrays.array_int32_2d_C_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_scalar_sub(language):
f1 = arrays.array_int32_2d_C_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_scalar_mul(language):
f1 = arrays.array_int32_2d_C_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_scalar_idiv(language):
f1 = arrays.array_int32_2d_C_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_add(language):
f1 = arrays.array_int32_2d_C_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_sub(language):
f1 = arrays.array_int32_2d_C_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_mul(language):
f1 = arrays.array_int32_2d_C_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_idiv(language):
f1 = arrays.array_int32_2d_C_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 2D ARRAYS OF INT-32 WITH F ORDERING
#==============================================================================
def test_array_int32_2d_F_scalar_add(language):
f1 = arrays.array_int32_2d_F_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_scalar_sub(language):
f1 = arrays.array_int32_2d_F_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_scalar_mul(language):
f1 = arrays.array_int32_2d_F_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = -1e9, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_scalar_idiv(language):
f1 = arrays.array_int32_2d_F_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = randint(low = 1, high = 1e9, dtype = np.int32)
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_add(language):
f1 = arrays.array_int32_2d_F_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_sub(language):
f1 = arrays.array_int32_2d_F_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_mul(language):
f1 = arrays.array_int32_2d_F_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_idiv(language):
f1 = arrays.array_int32_2d_F_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 1D ARRAYS OF INT-64
#==============================================================================
def test_array_int_1d_scalar_add(language):
f1 = arrays.array_int_1d_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_scalar_sub(language):
f1 = arrays.array_int_1d_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_scalar_mul(language):
f1 = arrays.array_int_1d_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_scalar_idiv(language):
f1 = arrays.array_int_1d_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_add(language):
f1 = arrays.array_int_1d_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_sub(language):
f1 = arrays.array_int_1d_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_mul(language):
f1 = arrays.array_int_1d_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_1d_idiv(language):
f1 = arrays.array_int_1d_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3] )
x2 = np.copy(x1)
a = np.array( [1,2,3] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 2D ARRAYS OF INT-64 WITH C ORDERING
#==============================================================================
def test_array_int_2d_C_scalar_add(language):
f1 = arrays.array_int_2d_C_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_scalar_sub(language):
f1 = arrays.array_int_2d_C_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_scalar_mul(language):
f1 = arrays.array_int_2d_C_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_scalar_idiv(language):
f1 = arrays.array_int_2d_C_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_add(language):
f1 = arrays.array_int_2d_C_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_sub(language):
f1 = arrays.array_int_2d_C_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_mul(language):
f1 = arrays.array_int_2d_C_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_idiv(language):
f1 = arrays.array_int_2d_C_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]] )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_C_initialization(language):
f1 = arrays.array_int_2d_C_initialization
f2 = epyccel(f1, language = language)
x1 = np.zeros((2, 3), dtype=int)
x2 = np.ones_like(x1)
f1(x1)
f2(x2)
assert np.array_equal(x1, x2)
#==============================================================================
# TEST: 2D ARRAYS OF INT-64 WITH F ORDERING
#==============================================================================
def test_array_int_2d_F_scalar_add(language):
f1 = arrays.array_int_2d_F_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_scalar_sub(language):
f1 = arrays.array_int_2d_F_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_scalar_mul(language):
f1 = arrays.array_int_2d_F_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_scalar_idiv(language):
f1 = arrays.array_int_2d_F_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = 5
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_add(language):
f1 = arrays.array_int_2d_F_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_sub(language):
f1 = arrays.array_int_2d_F_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_mul(language):
f1 = arrays.array_int_2d_F_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_idiv(language):
f1 = arrays.array_int_2d_F_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int_2d_F_initialization(language):
f1 = arrays.array_int_2d_F_initialization
f2 = epyccel(f1, language = language)
x1 = np.zeros((2, 3), dtype=int, order='F')
x2 = np.ones_like(x1)
f1(x1)
f2(x2)
assert np.array_equal(x1, x2)
#==============================================================================
# TEST: 1D ARRAYS OF REAL
#==============================================================================
def test_array_real_1d_scalar_add(language):
f1 = arrays.array_real_1d_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_scalar_sub(language):
f1 = arrays.array_real_1d_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_scalar_mul(language):
f1 = arrays.array_real_1d_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_scalar_div(language):
f1 = arrays.array_real_1d_scalar_div
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_scalar_idiv(language):
f1 = arrays.array_real_1d_scalar_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_add(language):
f1 = arrays.array_real_1d_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = np.array( [1.,2.,3.] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_sub(language):
f1 = arrays.array_real_1d_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = np.array( [1.,2.,3.] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_mul(language):
f1 = arrays.array_real_1d_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = np.array( [1.,2.,3.] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_div(language):
f1 = arrays.array_real_1d_div
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = np.array( [1.,2.,3.] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_1d_idiv(language):
f1 = arrays.array_real_1d_idiv
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = np.array( [1.,2.,3.] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 2D ARRAYS OF REAL WITH C ORDERING
#==============================================================================
def test_array_real_2d_C_scalar_add(language):
f1 = arrays.array_real_2d_C_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_scalar_sub(language):
f1 = arrays.array_real_2d_C_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_scalar_mul(language):
f1 = arrays.array_real_2d_C_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_scalar_div(language):
f1 = arrays.array_real_2d_C_scalar_div
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_add(language):
f1 = arrays.array_real_2d_C_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_sub(language):
f1 = arrays.array_real_2d_C_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_mul(language):
f1 = arrays.array_real_2d_C_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_div(language):
f1 = arrays.array_real_2d_C_div
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_array_initialization(language):
f1 = arrays.array_real_2d_C_array_initialization
f2 = epyccel(f1, language = language)
x1 = np.zeros((2, 3), dtype=float )
x2 = np.ones_like(x1)
f1(x1)
f2(x2)
assert np.array_equal(x1, x2)
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="array function doesn't handle list of variables. See #752"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_real_3d_C_array_initialization_1(language):
f1 = arrays.array_real_3d_C_array_initialization_1
f2 = epyccel(f1, language = language)
x = np.random.random((3,2))
y = np.random.random((3,2))
a = np.array([x,y])
x1 = np.zeros_like(a)
x2 = np.zeros_like(a)
f1(x, y, x1)
f2(x, y, x2)
assert np.array_equal(x1, x2)
def test_array_real_3d_C_array_initialization_2(language):
f1 = arrays.array_real_3d_C_array_initialization_2
f2 = epyccel(f1, language = language)
x1 = np.zeros((2,3,4))
x2 = np.zeros((2,3,4))
f1(x1)
f2(x2)
assert np.array_equal(x1, x2)
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="array function doesn't handle list of variables. See #752"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_real_4d_C_array_initialization(language):
f1 = arrays.array_real_4d_C_array_initialization
f2 = epyccel(f1, language = language)
x = np.random.random((3,2,4))
y = np.random.random((3,2,4))
a = np.array([x,y])
x1 = np.zeros_like(a)
x2 = np.zeros_like(a)
f1(x, y, x1)
f2(x, y, x2)
assert np.array_equal(x1, x2)
#==============================================================================
# TEST: 2D ARRAYS OF REAL WITH F ORDERING
#==============================================================================
def test_array_real_2d_F_scalar_add(language):
f1 = arrays.array_real_2d_F_scalar_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_F_scalar_sub(language):
f1 = arrays.array_real_2d_F_scalar_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_F_scalar_mul(language):
f1 = arrays.array_real_2d_F_scalar_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_F_scalar_div(language):
f1 = arrays.array_real_2d_F_scalar_div
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' )
x2 = np.copy(x1)
a = 5.
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_F_add(language):
f1 = arrays.array_real_2d_F_add
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' )
x2 = np.copy(x1)
a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]], order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_F_sub(language):
f1 = arrays.array_real_2d_F_sub
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' )
x2 = np.copy(x1)
a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]], order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_F_mul(language):
f1 = arrays.array_real_2d_F_mul
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' )
x2 = np.copy(x1)
a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]], order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_F_div(language):
f1 = arrays.array_real_2d_F_div
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' )
x2 = np.copy(x1)
a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]], order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_F_array_initialization(language):
f1 = arrays.array_real_2d_F_array_initialization
f2 = epyccel(f1, language = language)
x1 = np.zeros((2, 3), dtype=float, order='F')
x2 = np.ones_like(x1)
f1(x1)
f2(x2)
assert np.array_equal(x1, x2)
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="array function doesn't handle list of variables. See #752"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran)
]
)
def test_array_real_3d_F_array_initialization_1(language):
f1 = arrays.array_real_3d_F_array_initialization_1
f2 = epyccel(f1, language = language)
x = np.random.random((3,2)).copy(order='F')
y = np.random.random((3,2)).copy(order='F')
a = np.array([x,y], order='F')
x1 = np.zeros_like(a)
x2 = np.zeros_like(a)
f1(x, y, x1)
f2(x, y, x2)
assert np.array_equal(x1, x2)
def test_array_real_3d_F_array_initialization_2(language):
f1 = arrays.array_real_3d_F_array_initialization_2
f2 = epyccel(f1, language = language)
x1 = np.zeros((2,3,4), order='F')
x2 = np.zeros((2,3,4), order='F')
f1(x1)
f2(x2)
assert np.array_equal(x1, x2)
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="array function doesn't handle list of variables. See #752"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran),
pytest.param("python", marks = pytest.mark.python)
]
)
def test_array_real_4d_F_array_initialization(language):
f1 = arrays.array_real_4d_F_array_initialization
f2 = epyccel(f1, language = language)
x = np.random.random((3,2,4)).copy(order='F')
y = np.random.random((3,2,4)).copy(order='F')
a = np.array([x,y], order='F')
x1 = np.zeros_like(a)
x2 = np.zeros_like(a)
f1(x, y, x1)
f2(x, y, x2)
assert np.array_equal(x1, x2)
@pytest.mark.xfail(reason='Inhomogeneous arguments due to unknown shape')
def test_array_real_4d_F_array_initialization_mixed_ordering(language):
f1 = arrays.array_real_4d_F_array_initialization_mixed_ordering
f2 = epyccel(f1, language = language)
x = np.array([[16., 17.], [18., 19.]], dtype='float', order='F')
a = np.array(([[[0., 1.], [2., 3.]],
[[4., 5.], [6., 7.]],
[[8., 9.], [10., 11.]]],
[[[12., 13.], [14., 15.]],
x,
[[20., 21.], [22., 23.]]]),
dtype='float', order='F')
x1 = np.zeros_like(a)
x2 = np.zeros_like(a)
f1(x, x1)
f2(x, x2)
assert np.array_equal(x1, x2)
#==============================================================================
# TEST: COMPLEX EXPRESSIONS IN 3D : TEST CONSTANT AND UNKNOWN SHAPES
#==============================================================================
def test_array_int32_1d_complex_3d_expr(language):
f1 = arrays.array_int32_1d_complex_3d_expr
f2 = epyccel( f1 , language = language)
x1 = np.array( [1,2,3], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [-1,-2,-3], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_C_complex_3d_expr(language):
f1 = arrays.array_int32_2d_C_complex_3d_expr
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_2d_F_complex_3d_expr(language):
f1 = arrays.array_int32_2d_F_complex_3d_expr
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
x2 = np.copy(x1)
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_int32_in_bool_out_1d_complex_3d_expr(language):
f1 = arrays.array_int32_in_bool_out_1d_complex_3d_expr
f2 = epyccel( f1 , language = language)
x = np.array( [1,2,3], dtype=np.int32 )
a = np.array( [-1,-2,-3], dtype=np.int32 )
r1 = np.empty( 3 , dtype=bool )
r2 = np.copy(r1)
f1(x, a, r1)
f2(x, a, r2)
assert np.array_equal( r1, r2 )
def test_array_int32_in_bool_out_2d_C_complex_3d_expr(language):
f1 = arrays.array_int32_in_bool_out_2d_C_complex_3d_expr
f2 = epyccel( f1 , language = language)
x = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 )
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32 )
r1 = np.empty( (2,3) , dtype=bool )
r2 = np.copy(r1)
f1(x, a, r1)
f2(x, a, r2)
assert np.array_equal( r1, r2 )
def test_array_int32_in_bool_out_2d_F_complex_3d_expr(language):
f1 = arrays.array_int32_in_bool_out_2d_F_complex_3d_expr
f2 = epyccel( f1 , language = language)
x = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' )
a = np.array( [[-1,-2,-3], [-4,-5,-6]], dtype=np.int32, order='F' )
r1 = np.empty( (2,3) , dtype=bool, order='F' )
r2 = np.copy(r1)
f1(x, a, r1)
f2(x, a, r2)
assert np.array_equal( r1, r2 )
def test_array_real_1d_complex_3d_expr(language):
f1 = arrays.array_real_1d_complex_3d_expr
f2 = epyccel( f1 , language = language)
x1 = np.array( [1.,2.,3.] )
x2 = np.copy(x1)
a = np.array( [-1.,-2.,-3.] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_C_complex_3d_expr(language):
f1 = arrays.array_real_2d_C_complex_3d_expr
f2 = epyccel( f1 , language = language)
x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] )
x2 = np.copy(x1)
a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]] )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
def test_array_real_2d_F_complex_3d_expr(language):
f1 = arrays.array_real_2d_F_complex_3d_expr
f2 = epyccel( f1 , language = language)
x1 = np.array( [[ 1., 2., 3.], [4.,5.,6.]], order='F' )
x2 = np.copy(x1)
a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]], order='F' )
f1(x1, a)
f2(x2, a)
assert np.array_equal( x1, x2 )
#==============================================================================
# TEST: 1D Stack ARRAYS OF REAL
#==============================================================================
def test_array_real_sum_stack_array(language):
f1 = arrays.array_real_1d_sum_stack_array
f2 = epyccel( f1 , language = language)
x1 = f1()
x2 = f2()
assert np.equal( x1, x2 )
def test_array_real_div_stack_array(language):
f1 = arrays.array_real_1d_div_stack_array
f2 = epyccel( f1 , language = language)
x1 = f1()
x2 = f2()
assert np.equal( x1, x2 )
def test_multiple_stack_array_1(language):
f1 = arrays.multiple_stack_array_1
f2 = epyccel(f1, language = language)
assert np.equal(f1(), f2())
def test_multiple_stack_array_2(language):
f1 = arrays.multiple_stack_array_2
f2 = epyccel(f1, language = language)
assert np.equal(f1(), f2())
#==============================================================================
# TEST: 2D Stack ARRAYS OF REAL
#==============================================================================
def test_array_real_sum_2d_stack_array(language):
f1 = arrays.array_real_2d_sum_stack_array
f2 = epyccel( f1 , language = language)
x1 = f1()
x2 = f2()
assert np.equal( x1, x2 )
def test_array_real_div_2d_stack_array(language):
f1 = arrays.array_real_2d_div_stack_array
f2 = epyccel( f1 , language = language)
x1 = f1()
x2 = f2()
assert np.equal( x1, x2 )
def test_multiple_2d_stack_array_1(language):
f1 = arrays.multiple_2d_stack_array_1
f2 = epyccel(f1, language = language)
assert np.equal(f1(), f2())
def test_multiple_2d_stack_array_2(language):
f1 = arrays.multiple_2d_stack_array_2
f2 = epyccel(f1, language = language)
assert np.equal(f1(), f2())
#==============================================================================
# TEST: Product and matrix multiplication
#==============================================================================
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="prod not implemented in c"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran),
pytest.param("python", marks = pytest.mark.python)
]
)
def test_array_real_1d_1d_prod(language):
f1 = arrays.array_real_1d_1d_prod
f2 = epyccel( f1 , language = language)
x1 = np.array([3.0, 2.0, 1.0])
x2 = np.copy(x1)
y1 = np.empty(3)
y2 = np.empty(3)
f1(x1, y1)
f2(x2, y2)
assert np.array_equal(y1, y2)
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="matmul not implemented in c"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran),
pytest.param("python", marks = pytest.mark.python)
]
)
def test_array_real_2d_1d_matmul(language):
f1 = arrays.array_real_2d_1d_matmul
f2 = epyccel( f1 , language = language)
A1 = np.ones([3, 2])
A1[1,0] = 2
A2 = np.copy(A1)
x1 = np.ones([2])
x2 = np.copy(x1)
y1 = np.empty([3])
y2 = np.empty([3])
f1(A1, x1, y1)
f2(A2, x2, y2)
assert np.array_equal(y1, y2)
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="matmul not implemented in c"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran),
pytest.param("python", marks = pytest.mark.python)
]
)
def test_array_real_2d_1d_matmul_order_F_F(language):
f1 = arrays.array_real_2d_1d_matmul_order_F
f2 = epyccel( f1 , language = language)
A1 = np.ones([3, 2], order='F')
A1[1,0] = 2
A2 = np.copy(A1)
x1 = np.ones([2])
x2 = np.copy(x1)
y1 = np.empty([3])
y2 = np.empty([3])
f1(A1, x1, y1)
f2(A2, x2, y2)
assert np.array_equal(y1, y2)
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="matmul not implemented in c"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran),
pytest.param("python", marks = pytest.mark.python)
]
)
def test_array_real_2d_2d_matmul(language):
f1 = arrays.array_real_2d_2d_matmul
f2 = epyccel( f1 , language = language)
A1 = np.ones([3, 2])
A1[1, 0] = 2
A2 = np.copy(A1)
B1 = np.ones([2, 3])
B2 = np.copy(B1)
C1 = np.empty([3,3])
C2 = np.empty([3,3])
f1(A1, B1, C1)
f2(A2, B2, C2)
assert np.array_equal(C1, C2)
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="matmul not implemented in c"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran),
pytest.param("python", marks = pytest.mark.python)
]
)
def test_array_real_2d_2d_matmul_F_F_F_F(language):
f1 = arrays.array_real_2d_2d_matmul_F_F
f2 = epyccel( f1 , language = language)
A1 = np.ones([3, 2], order='F')
A1[1, 0] = 2
A2 = np.copy(A1)
B1 = np.ones([2, 3], order='F')
B2 = np.copy(B1)
C1 = np.empty([3,3], order='F')
C2 = np.empty([3,3], order='F')
f1(A1, B1, C1)
f2(A2, B2, C2)
assert np.array_equal(C1, C2)
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="matmul not implemented in c"),
pytest.mark.c]),
pytest.param("fortran", marks = [
pytest.mark.fortran,
pytest.mark.skip(reason="Should fail as long as mixed order not supported, see #244")
]),
pytest.param("python", marks = pytest.mark.python)
]
)
def test_array_real_2d_2d_matmul_mixorder(language):
f1 = arrays.array_real_2d_2d_matmul_mixorder
f2 = epyccel( f1 , language = language)
A1 = np.ones([3, 2])
A1[1, 0] = 2
A2 = np.copy(A1)
B1 = np.ones([2, 3], order = 'F')
B2 = np.copy(B1)
C1 = np.empty([3,3])
C2 = np.empty([3,3])
f1(A1, B1, C1)
f2(A2, B2, C2)
assert np.array_equal(C1, C2)
@pytest.mark.parametrize( 'language', [
pytest.param("c", marks = [
pytest.mark.skip(reason="matmul not implemented in c"),
pytest.mark.c]),
pytest.param("fortran", marks = pytest.mark.fortran),
pytest.param("python", marks = pytest.mark.python)
]
)
def test_array_real_2d_2d_matmul_operator(language):
f1 = arrays.array_real_2d_2d_matmul_operator
f2 = epyccel( f1 , language = language)
A1 = np.ones([3, 2])
A1[1, 0] = 2
A2 = np.copy(A1)
B1 = np.ones([2, 3])
B2 = np.copy(B1)
C1 = np.empty([3,3])
C2 = np.empty([3,3])
f1(A1, B1, C1)
f2(A2, B2, C2)
assert np.array_equal(C1, C2)
def test_array_real_loopdiff(language):
f1 = arrays.array_real_loopdiff
f2 = epyccel( f1 , language = language)
x1 = np.ones(5)
y1 = np.zeros(5)
x2 = | np.copy(x1) | numpy.copy |
#!/usr/bin/env python
# Copyright (c) 2018-2019, <NAME>
# Copyright (c) 2012-2019, Department of Otolaryngology,
# Graduate School of Medicine, Kyoto University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os, platform, sys, argparse, pandas, numpy, itertools, tifffile
from taniclass import gaussian8, spotmarker
from PIL import Image, ImageDraw, ImageFont
# prepare tracing library
tracer = gaussian8.Gaussian8()
marker = spotmarker.SpotMarker()
# defaults
input_filename = None
output_filename = None
use_plane = 0
laplaces = [tracer.laplace]
min_distances = [tracer.min_distance]
threshold_abses = [tracer.threshold_abs]
if platform.system() == "Windows":
font_file = 'C:/Windows/Fonts/Arial.ttf'
elif platform.system() == "Linux":
font_file = '/usr/share/fonts/dejavu/DejaVuSans.ttf'
elif platform.system() == "Darwin":
font_file = '/Library/Fonts/Verdana.ttf'
else:
raise Exception('font file error.')
font_size = 20
font_color = 'white'
# parse arguments
parser = argparse.ArgumentParser(description='Try detecting fluorescent spots changing parameters', \
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-o', '--output-file', nargs=1, default=output_filename, \
help='output multipage TIFF file ([basename]_fit.tif if not specified)')
parser.add_argument('-p', '--use-plane', nargs=1, type=int, default=[use_plane], \
help='frame to detect spots (the first frame if not specified)')
parser.add_argument('-x', '--max-diameter', nargs=1, type=float, default=[tracer.max_diameter], \
help='limit the maximum diameter of spots (to avoid abnormal fitting)')
parser.add_argument('-z', '--marker-size', nargs=1, type=int, default=[marker.marker_size], \
help='marker size to draw detected spots')
parser.add_argument('-c', '--marker-colors', nargs=2, type=str, \
metavar = ('NORMAL', 'REDUNDANT'), \
default=[marker.marker_colors[0], marker.marker_colors[3]],
help='marker colors to draw spots detected normally and redundantly')
parser.add_argument('-i', '--invert-image', action='store_true', default=marker.invert_image, \
help='invert the LUT of output image')
group = parser.add_mutually_exclusive_group()
group.add_argument('-m', '--min-distance', nargs=1, type=int, default=min_distances, \
help='pixel area to find local max (usually use default)')
group.add_argument('-M', '--min-distance-range', nargs=3, type=int,\
metavar=('BEGIN', 'END', 'STEP'), \
help='range of "min distance" to try (specify by integers)')
group = parser.add_mutually_exclusive_group()
group.add_argument('-l', '--laplace', nargs=1, type=float, default=laplaces, \
help='sigma of LoG filter (try near the pixel diameter of spots)')
group.add_argument('-L', '--laplace-range', nargs=3, type=float,\
metavar=('BEGIN', 'END', 'STEP'), \
help='range of "laplace" to try (specify by floats)')
group = parser.add_mutually_exclusive_group()
group.add_argument('-t', '--threshold-abs', nargs=1, type=float, default=threshold_abses, \
help='threshold of Gaussian fitting')
group.add_argument('-T', '--threshold-abs-range', nargs=3, type=float, \
metavar=('BEGIN', 'END', 'STEP'), \
help='range of "threshod abs" to tru (specify by floats)')
parser.add_argument('input_file', nargs=1, default=input_filename, \
help='input (multipage) TIFF file')
args = parser.parse_args()
# set arguments
input_filename = args.input_file[0]
use_plane = args.use_plane[0]
marker.marker_colors = [args.marker_colors[0] for i in range(3)] + [args.marker_colors[1]]
marker.marker_size = args.marker_size[0]
marker.invert_image = args.invert_image
tracer.max_diameter = args.max_diameter[0]
if args.output_file is None:
output_filename = os.path.splitext(os.path.basename(input_filename))[0] + '_fit.tif'
if input_filename == output_filename:
raise Exception('input_filename == output_filename')
else:
output_filename = args.output_file[0]
# set ranged arguments
min_distances = args.min_distance
if args.min_distance_range is not None:
min_distances = | numpy.arange(*args.min_distance_range) | numpy.arange |
# Import necessary packages here
import os
import sys
import platform
import numpy as np
import pandas as pd
sys.path.insert(0, os.path.abspath('../core_utilities'))
from core_utilities.plotting import MatPlotDataFrame
# ================================================================================
# ================================================================================
# Date: Month Day, Year
# Purpose: Describe the types of testing to occur in this file.
# Instruction: This code can be run in hte following ways
# - pytest # runs all functions beginnning with the word test in the
# directory
# - pytest file_name.py # Runs all functions in file_name beginning
# with the word test
# - pytest file_name.py::test_func_name # Runs only the function
# titled test_func_name in
# the file_name.py file
# - pytest -s # Runs tests and displays when a specific file
# has completed testing, and what functions failed.
# Also displays print statments
# - pytest -v # Displays test results on a function by function basis
# - pytest -p no:warnings # Runs tests and does not display warning
# messages
# - pytest -s -v -p no:warnings # Displays relevant information and
# supports debugging
# - pytest -s -p no:warnings # Run for record
# Source Code Metadata
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Jon Webb Inc."
__version__ = "1.0"
# ================================================================================
# ================================================================================
# Insert Code here
plat = platform.system()
lin_plat = ['Darwin', 'Linux']
def test_scatter_plot_parse_columns():
"""
This functon tests the ability of scatter_plot_parse_column
within the MatPlotDataFrame class to process a plot without
failing
"""
length = 20
x = np.linspace(0, 20, num=20)
linear = x
squared = x ** 2.0
lin = np.repeat('linear', 20)
sq = np.repeat('squared', 20)
# Combine arrays into one
x = np.hstack((x, x))
y = np.hstack((linear, squared))
power = | np.hstack((lin, sq)) | numpy.hstack |
import math
import warnings
from copy import copy, deepcopy
from datetime import datetime
from typing import Mapping, MutableMapping, MutableSequence, Optional
import numpy as np # type: ignore
import pytest # type: ignore
from rads.rpn import (
ABS,
ACOS,
ACOSD,
ACOSH,
ADD,
AND,
ASIN,
ASIND,
ASINH,
ATAN,
ATAN2,
ATAND,
ATANH,
AVG,
BOXCAR,
BTEST,
CEIL,
CEILING,
COS,
COSD,
COSH,
D2R,
DIF,
DIV,
DUP,
DXDY,
EQ,
EXCH,
EXP,
FLOOR,
FMOD,
GAUSS,
GE,
GT,
HYPOT,
IAND,
INRANGE,
INV,
IOR,
ISAN,
ISNAN,
LE,
LOG,
LOG10,
LT,
MAX,
MIN,
MUL,
NAN,
NE,
NEG,
NINT,
OR,
PI,
POP,
POW,
R2,
R2D,
RINT,
SIN,
SIND,
SINH,
SQR,
SQRT,
SUB,
SUM,
TAN,
TAND,
TANH,
YMDHMS,
CompleteExpression,
E,
Expression,
Literal,
StackUnderflowError,
Token,
Variable,
token,
)
from rads.typing import FloatOrArray
GOLDEN_RATIO = math.log((1 + math.sqrt(5)) / 2)
class TestLiteral:
def test_init(self):
Literal(3)
Literal(3.14)
with pytest.raises(TypeError):
Literal("not a number") # type: ignore
def test_pops(self):
assert Literal(3).pops == 0
def test_puts(self):
assert Literal(3).puts == 1
def test_value(self):
assert Literal(3).value == 3
assert Literal(3.14).value == 3.14
def test_call(self):
stack: MutableSequence[FloatOrArray] = []
environment: MutableMapping[str, FloatOrArray] = {}
assert Literal(3.14)(stack, environment) is None
assert Literal(2.71)(stack, environment) is None
assert stack == [3.14, 2.71]
assert environment == {}
def test_eq(self):
assert Literal(3.14) == Literal(3.14)
assert not Literal(3.14) == Literal(2.71)
assert not Literal(3.14) == 3.14
def test_ne(self):
assert Literal(3.14) != Literal(2.71)
assert not Literal(3.14) != Literal(3.14)
assert Literal(3.14) != 3.14
def test_lt(self):
assert Literal(2.71) < Literal(3.14)
assert not Literal(3.14) < Literal(2.71)
with pytest.raises(TypeError):
Literal(2.71) < 3.14
with pytest.raises(TypeError):
2.71 < Literal(3.14)
def test_le(self):
assert Literal(2.71) <= Literal(3.14)
assert Literal(3.14) <= Literal(3.14)
assert not Literal(3.14) <= Literal(2.71)
with pytest.raises(TypeError):
Literal(2.71) <= 3.14
with pytest.raises(TypeError):
2.71 <= Literal(3.14)
def test_gt(self):
assert Literal(3.14) > Literal(2.71)
assert not Literal(2.71) > Literal(3.14)
with pytest.raises(TypeError):
Literal(3.14) > 2.71
with pytest.raises(TypeError):
3.14 > Literal(2.71)
def test_ge(self):
assert Literal(3.14) >= Literal(2.71)
assert Literal(3.14) >= Literal(3.14)
assert not Literal(2.71) >= Literal(3.14)
with pytest.raises(TypeError):
Literal(3.14) >= 2.71
with pytest.raises(TypeError):
3.14 >= Literal(2.71)
def test_repr(self):
assert repr(Literal(3)) == "Literal(3)"
assert repr(Literal(3.14)) == "Literal(3.14)"
def test_str(self):
assert str(Literal(3)) == "3"
assert str(Literal(3.14)) == "3.14"
def test_pi(self):
assert PI.value == pytest.approx(np.pi)
def test_e(self):
assert E.value == pytest.approx(np.e)
class TestVariable:
def test_init(self):
Variable("alt")
with pytest.raises(ValueError):
Variable("3")
with pytest.raises(ValueError):
Variable("3name")
with pytest.raises(TypeError):
Variable(3) # type: ignore
with pytest.raises(TypeError):
Variable(3.14) # type: ignore
def test_pops(self):
assert Variable("alt").pops == 0
def test_puts(self):
assert Variable("alt").puts == 1
def test_name(self):
assert Variable("alt").name == "alt"
def test_call(self):
stack: MutableSequence[FloatOrArray] = []
environment = {"alt": np.array([1, 2, 3]), "dry_tropo": 4, "wet_tropo": 5}
assert Variable("wet_tropo")(stack, environment) is None
assert Variable("alt")(stack, environment) is None
assert len(stack) == 2
assert stack[0] == 5
assert np.all(stack[1] == np.array([1, 2, 3]))
assert len(environment) == 3
assert "alt" in environment
assert "dry_tropo" in environment
assert "wet_tropo" in environment
assert np.all(environment["alt"] == np.array([1, 2, 3]))
assert environment["dry_tropo"] == 4
assert environment["wet_tropo"] == 5
with pytest.raises(KeyError):
assert Variable("alt")(stack, {}) is None
assert len(stack) == 2
assert stack[0] == 5
assert np.all(stack[1] == np.array([1, 2, 3]))
def test_eq(self):
assert Variable("alt") == Variable("alt")
assert not Variable("alt") == Variable("dry_tropo")
assert not Variable("alt") == "alt"
def test_ne(self):
assert Variable("alt") != Variable("dry_tropo")
assert not Variable("alt") != Variable("alt")
assert Variable("alt") != "alt"
def test_repr(self):
assert repr(Variable("alt")) == "Variable('alt')"
def test_str(self):
assert str(Variable("alt")) == "alt"
def contains_array(stack: MutableSequence[FloatOrArray]) -> bool:
for item in stack:
if isinstance(item, np.ndarray):
return True
return False
def contains_nan(stack: MutableSequence[FloatOrArray]) -> bool:
for item in stack:
try:
if math.isnan(item):
return True
except TypeError:
pass
return False
def assert_token(
operator: Token,
pre_stack: MutableSequence[FloatOrArray],
post_stack: MutableSequence[FloatOrArray],
environment: Optional[Mapping[str, FloatOrArray]] = None,
*,
approx: bool = False,
rtol: float = 1e-15,
atol: float = 1e-16,
) -> None:
"""Assert that a token modifies the stack properly.
Parameters
----------
operator
Operator to test.
pre_stack
Stack state before calling the operator.
post_stack
Desired stack state after calling the operator.
environment
Optional dictionary like object providing the environment for
variable lookup.
approx
Set to true to use approximate equality instead of exact.
rtol
Relative tolerance. Only used if :paramref:`approx` is True.
atol
Absolute tolerance. Only used if :paramref:`approx` is True.
Raises
------
AssertionError
If the operator does not produce the proper post stack state or the
environment parameter is changed.
"""
if not environment:
environment = {"dont_touch": 5}
original_environment = deepcopy(environment)
stack = pre_stack
operator(stack, environment)
# environment should be unchanged
assert environment == original_environment
# check stack
if approx or contains_nan(post_stack) or contains_array(post_stack):
assert len(stack) == len(post_stack)
for a, b in zip(stack, post_stack):
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
if approx:
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, equal_nan=True
)
else:
np.testing.assert_equal(a, b)
else:
if math.isnan(b):
assert math.isnan(a)
elif approx:
assert a == pytest.approx(b, rel=rtol, abs=atol)
else:
assert a == b
else:
assert stack == post_stack
class TestSUBOperator:
def test_repr(self):
assert repr(SUB) == "SUB"
def test_pops(self):
assert SUB.pops == 2
def test_puts(self):
assert SUB.puts == 1
def test_no_copy(self):
assert copy(SUB) is SUB
assert deepcopy(SUB) is SUB
def test_call(self):
assert_token(SUB, [2, 4], [-2])
assert_token(SUB, [2, np.array([4, 1])], [np.array([-2, 1])])
assert_token(SUB, [np.array([4, 1]), 2], [np.array([2, -1])])
assert_token(SUB, [np.array([4, 1]), np.array([1, 4])], [np.array([3, -3])])
# extra stack elements
assert_token(SUB, [0, 2, 4], [0, -2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SUB([], {})
with pytest.raises(StackUnderflowError):
SUB([1], {})
class TestADDOperator:
def test_repr(self):
assert repr(ADD) == "ADD"
def test_pops(self):
assert ADD.pops == 2
def test_puts(self):
assert ADD.puts == 1
def test_no_copy(self):
assert copy(ADD) is ADD
assert deepcopy(ADD) is ADD
def test_call(self):
assert_token(ADD, [2, 4], [6])
assert_token(ADD, [2, np.array([4, 1])], [np.array([6, 3])])
assert_token(ADD, [np.array([4, 1]), 2], [np.array([6, 3])])
assert_token(ADD, [np.array([4, 1]), np.array([1, 4])], [np.array([5, 5])])
# extra stack elements
assert_token(ADD, [0, 2, 4], [0, 6])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ADD([], {})
with pytest.raises(StackUnderflowError):
ADD([1], {})
class TestMULOperator:
def test_repr(self):
assert repr(MUL) == "MUL"
def test_pops(self):
assert MUL.pops == 2
def test_puts(self):
assert MUL.puts == 1
def test_no_copy(self):
assert copy(MUL) is MUL
assert deepcopy(MUL) is MUL
def test_call(self):
assert_token(MUL, [2, 4], [8])
assert_token(MUL, [2, np.array([4, 1])], [np.array([8, 2])])
assert_token(MUL, [np.array([4, 1]), 2], [np.array([8, 2])])
assert_token(MUL, [np.array([4, 1]), np.array([1, 4])], [np.array([4, 4])])
# extra stack elements
assert_token(MUL, [0, 2, 4], [0, 8])
# not enough stack elements
with pytest.raises(StackUnderflowError):
MUL([], {})
with pytest.raises(StackUnderflowError):
MUL([1], {})
class TestPOPOperator:
def test_repr(self):
assert repr(POP) == "POP"
def test_pops(self):
assert POP.pops == 1
def test_puts(self):
assert POP.puts == 0
def test_no_copy(self):
assert copy(POP) is POP
assert deepcopy(POP) is POP
def test_call(self):
assert_token(POP, [1], [])
assert_token(POP, [1, 2], [1])
# not enough stack elements
with pytest.raises(StackUnderflowError):
POP([], {})
class TestNEGOperator:
def test_repr(self):
assert repr(NEG) == "NEG"
def test_pops(self):
assert NEG.pops == 1
def test_puts(self):
assert NEG.puts == 1
def test_no_copy(self):
assert copy(NEG) is NEG
assert deepcopy(NEG) is NEG
def test_call(self):
assert_token(NEG, [2], [-2])
assert_token(NEG, [-2], [2])
assert_token(NEG, [np.array([4, -1])], [np.array([-4, 1])])
assert_token(NEG, [np.array([-4, 1])], [np.array([4, -1])])
# extra stack elements
assert_token(NEG, [0, 2], [0, -2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NEG([], {})
class TestABSOperator:
def test_repr(self):
assert repr(ABS) == "ABS"
def test_pops(self):
assert ABS.pops == 1
def test_puts(self):
assert ABS.puts == 1
def test_no_copy(self):
assert copy(ABS) is ABS
assert deepcopy(ABS) is ABS
def test_call(self):
assert_token(ABS, [2], [2])
assert_token(ABS, [-2], [2])
assert_token(ABS, [np.array([4, -1])], [np.array([4, 1])])
assert_token(ABS, [np.array([-4, 1])], [np.array([4, 1])])
# extra stack elements
assert_token(ABS, [0, -2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ABS([], {})
class TestINVOperator:
def test_repr(self):
assert repr(INV) == "INV"
def test_pops(self):
assert INV.pops == 1
def test_puts(self):
assert INV.puts == 1
def test_no_copy(self):
assert copy(INV) is INV
assert deepcopy(INV) is INV
def test_call(self):
assert_token(INV, [2], [0.5])
assert_token(INV, [-2], [-0.5])
assert_token(INV, [np.array([4, -1])], [np.array([0.25, -1])])
assert_token(INV, [np.array([-4, 1])], [np.array([-0.25, 1])])
# extra stack elements
assert_token(INV, [0, 2], [0, 0.5])
# not enough stack elements
with pytest.raises(StackUnderflowError):
INV([], {})
class TestSQRTOperator:
def test_repr(self):
assert repr(SQRT) == "SQRT"
def test_pops(self):
assert SQRT.pops == 1
def test_puts(self):
assert SQRT.puts == 1
def test_no_copy(self):
assert copy(SQRT) is SQRT
assert deepcopy(SQRT) is SQRT
def test_call(self):
assert_token(SQRT, [4], [2])
assert_token(SQRT, [np.array([4, 16])], [np.array([2, 4])])
# extra stack elements
assert_token(SQRT, [0, 4], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SQRT([], {})
class TestSQROperator:
def test_repr(self):
assert repr(SQR) == "SQR"
def test_pops(self):
assert SQR.pops == 1
def test_puts(self):
assert SQR.puts == 1
def test_no_copy(self):
assert copy(EXP) is EXP
assert deepcopy(EXP) is EXP
def test_call(self):
assert_token(SQR, [2], [4])
assert_token(SQR, [-2], [4])
assert_token(SQR, [np.array([4, -1])], [np.array([16, 1])])
assert_token(SQR, [np.array([-4, 1])], [np.array([16, 1])])
# extra stack elements
assert_token(SQR, [0, -2], [0, 4])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SQR([], {})
class TestEXPOperator:
def test_repr(self):
assert repr(EXP) == "EXP"
def test_pops(self):
assert EXP.pops == 1
def test_puts(self):
assert EXP.puts == 1
def test_no_copy(self):
assert copy(EXP) is EXP
assert deepcopy(EXP) is EXP
def test_call(self):
assert_token(EXP, [math.log(1)], [1.0], approx=True)
assert_token(EXP, [math.log(2)], [2.0], approx=True)
assert_token(
EXP, [np.array([np.log(4), np.log(1)])], [np.array([4.0, 1.0])], approx=True
)
# extra stack elements
assert_token(EXP, [0, np.log(1)], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
EXP([], {})
class TestLOGOperator:
def test_repr(self):
assert repr(LOG) == "LOG"
def test_pops(self):
assert LOG.pops == 1
def test_puts(self):
assert LOG.puts == 1
def test_no_copy(self):
assert copy(LOG) is LOG
assert deepcopy(LOG) is LOG
def test_call(self):
assert_token(LOG, [math.e], [1.0], approx=True)
assert_token(LOG, [math.e ** 2], [2.0], approx=True)
assert_token(LOG, [math.e ** -2], [-2.0], approx=True)
assert_token(
LOG,
[np.array([np.e ** 4, np.e ** -1])],
[np.array([4.0, -1.0])],
approx=True,
)
assert_token(
LOG,
[np.array([np.e ** -4, np.e ** 1])],
[np.array([-4.0, 1.0])],
approx=True,
)
# extra stack elements
assert_token(LOG, [0, np.e], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
LOG([], {})
class TestLOG10Operator:
def test_repr(self):
assert repr(LOG10) == "LOG10"
def test_pops(self):
assert LOG10.pops == 1
def test_puts(self):
assert LOG10.puts == 1
def test_no_copy(self):
assert copy(LOG10) is LOG10
assert deepcopy(LOG10) is LOG10
def test_call(self):
assert_token(LOG10, [10], [1.0], approx=True)
assert_token(LOG10, [10 ** 2], [2.0], approx=True)
assert_token(LOG10, [10 ** -2], [-2.0], approx=True)
assert_token(
LOG10, [np.array([10 ** 4, 10 ** -1])], [np.array([4.0, -1.0])], approx=True
)
assert_token(
LOG10, [np.array([10 ** -4, 10 ** 1])], [np.array([-4.0, 1.0])], approx=True
)
# extra stack elements
assert_token(LOG10, [0, 10], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
LOG10([], {})
class TestSINOperator:
def test_repr(self):
assert repr(SIN) == "SIN"
def test_pops(self):
assert SIN.pops == 1
def test_puts(self):
assert SIN.puts == 1
def test_no_copy(self):
assert copy(SIN) is SIN
assert deepcopy(SIN) is SIN
def test_call(self):
assert_token(SIN, [0.0], [0.0], approx=True)
assert_token(SIN, [math.pi / 6], [1 / 2], approx=True)
assert_token(SIN, [math.pi / 4], [1 / math.sqrt(2)], approx=True)
assert_token(SIN, [math.pi / 3], [math.sqrt(3) / 2], approx=True)
assert_token(SIN, [math.pi / 2], [1.0], approx=True)
assert_token(
SIN,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
assert_token(
SIN,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
# extra stack elements
assert_token(SIN, [0, math.pi / 2], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SIN([], {})
class TestCOSOperator:
def test_repr(self):
assert repr(COS) == "COS"
def test_pops(self):
assert COS.pops == 1
def test_puts(self):
assert COS.puts == 1
def test_no_copy(self):
assert copy(COS) is COS
assert deepcopy(COS) is COS
def test_call(self):
assert_token(COS, [0.0], [1.0], approx=True)
assert_token(COS, [math.pi / 6], [math.sqrt(3) / 2], approx=True)
assert_token(COS, [math.pi / 4], [1 / math.sqrt(2)], approx=True)
assert_token(COS, [math.pi / 3], [1 / 2], approx=True)
assert_token(COS, [math.pi / 2], [0.0], approx=True)
assert_token(
COS,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
assert_token(
COS,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
# extra stack elements
assert_token(COS, [0, math.pi / 2], [0, 0.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COS([], {})
class TestTANOperator:
def test_repr(self):
assert repr(TAN) == "TAN"
def test_pops(self):
assert TAN.pops == 1
def test_puts(self):
assert TAN.puts == 1
def test_no_copy(self):
assert copy(TAN) is TAN
assert deepcopy(TAN) is TAN
def test_call(self):
assert_token(TAN, [0.0], [0.0], approx=True)
assert_token(TAN, [math.pi / 6], [1 / math.sqrt(3)], approx=True)
assert_token(TAN, [math.pi / 4], [1.0], approx=True)
assert_token(TAN, [math.pi / 3], [math.sqrt(3)], approx=True)
assert_token(
TAN,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
assert_token(
TAN,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
# extra stack elements
assert_token(TAN, [0, math.pi / 4], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TAN([], {})
class TestSINDOperator:
def test_repr(self):
assert repr(SIND) == "SIND"
def test_pops(self):
assert SIND.pops == 1
def test_puts(self):
assert SIND.puts == 1
def test_no_copy(self):
assert copy(COSD) is COSD
assert deepcopy(COSD) is COSD
def test_call(self):
assert_token(SIND, [0], [0.0], approx=True)
assert_token(SIND, [30], [1 / 2], approx=True)
assert_token(SIND, [45], [1 / math.sqrt(2)], approx=True)
assert_token(SIND, [60], [math.sqrt(3) / 2], approx=True)
assert_token(SIND, [90], [1.0], approx=True)
assert_token(
SIND,
[np.array([0, 30, 45, 60, 90])],
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
assert_token(
SIND,
[-np.array([0, 30, 45, 60, 90])],
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
approx=True,
)
# extra stack elements
assert_token(SIND, [0, 90], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SIND([], {})
class TestCOSDOperator:
def test_repr(self):
assert repr(COSD) == "COSD"
def test_pops(self):
assert COSD.pops == 1
def test_puts(self):
assert COSD.puts == 1
def test_no_copy(self):
assert copy(COSD) is COSD
assert deepcopy(COSD) is COSD
def test_call(self):
assert_token(COSD, [0], [1.0], approx=True)
assert_token(COSD, [30], [math.sqrt(3) / 2], approx=True)
assert_token(COSD, [45], [1 / math.sqrt(2)], approx=True)
assert_token(COSD, [60], [1 / 2], approx=True)
assert_token(COSD, [90], [0.0], approx=True)
assert_token(
COSD,
[np.array([0, 30, 45, 60, 90])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
assert_token(
COSD,
[-np.array([0, 30, 45, 60, 90])],
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
approx=True,
)
# extra stack elements
assert_token(COSD, [0, 90], [0, 0.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COSD([], {})
class TestTANDOperator:
def test_repr(self):
assert repr(TAND) == "TAND"
def test_pops(self):
assert TAND.pops == 1
def test_puts(self):
assert TAND.puts == 1
def test_no_copy(self):
assert copy(TAND) is TAND
assert deepcopy(TAND) is TAND
def test_call(self):
assert_token(TAND, [0], [0], approx=True)
assert_token(TAND, [30], [1 / math.sqrt(3)], approx=True)
assert_token(TAND, [45], [1.0], approx=True)
assert_token(TAND, [60], [math.sqrt(3)], approx=True)
assert_token(
TAND,
[np.array([0, 30, 45, 60])],
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
assert_token(
TAND,
[-np.array([0, 30, 45, 60])],
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
approx=True,
)
# extra stack elements
assert_token(TAND, [0, 45], [0, 1.0], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TAND([], {})
class TestSINHOperator:
def test_repr(self):
assert repr(SINH) == "SINH"
def test_pops(self):
assert SINH.pops == 1
def test_puts(self):
assert SINH.puts == 1
def test_no_copy(self):
assert copy(SINH) is SINH
assert deepcopy(SINH) is SINH
def test_call(self):
assert_token(SINH, [0.0], [0.0], approx=True)
assert_token(SINH, [GOLDEN_RATIO], [0.5], approx=True)
assert_token(
SINH, [np.array([0.0, GOLDEN_RATIO])], [np.array([0.0, 0.5])], approx=True
)
# extra stack elements
assert_token(SINH, [0, GOLDEN_RATIO], [0, 0.5], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
SINH([], {})
class TestCOSHOperator:
def test_repr(self):
assert repr(COSH) == "COSH"
def test_pops(self):
assert COSH.pops == 1
def test_puts(self):
assert COSH.puts == 1
def test_no_copy(self):
assert copy(COSH) is COSH
assert deepcopy(COSH) is COSH
def test_call(self):
assert_token(COSH, [0.0], [1.0], approx=True)
assert_token(COSH, [GOLDEN_RATIO], [math.sqrt(5) / 2], approx=True)
assert_token(
COSH,
[np.array([0.0, GOLDEN_RATIO])],
[np.array([1.0, np.sqrt(5) / 2])],
approx=True,
)
# extra stack elements
assert_token(COSH, [0, GOLDEN_RATIO], [0, math.sqrt(5) / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
COSH([], {})
class TestTANHOperator:
def test_repr(self):
assert repr(TANH) == "TANH"
def test_pops(self):
assert TANH.pops == 1
def test_puts(self):
assert TANH.puts == 1
def test_no_copy(self):
assert copy(TANH) is TANH
assert deepcopy(TANH) is TANH
def test_call(self):
assert_token(TANH, [0.0], [0.0], approx=True)
assert_token(TANH, [GOLDEN_RATIO], [math.sqrt(5) / 5], approx=True)
assert_token(
TANH,
[np.array([0.0, GOLDEN_RATIO])],
[np.array([0.0, np.sqrt(5) / 5])],
approx=True,
)
# extra stack elements
assert_token(TANH, [0, GOLDEN_RATIO], [0, math.sqrt(5) / 5], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
TANH([], {})
class TestASINOperator:
def test_repr(self):
assert repr(ASIN) == "ASIN"
def test_pops(self):
assert ASIN.pops == 1
def test_puts(self):
assert ASIN.puts == 1
def test_no_copy(self):
assert copy(ASIN) is ASIN
assert deepcopy(ASIN) is ASIN
def test_call(self):
assert_token(ASIN, [0.0], [0.0], approx=True)
assert_token(ASIN, [1 / 2], [math.pi / 6], approx=True)
assert_token(ASIN, [1 / math.sqrt(2)], [math.pi / 4], approx=True)
assert_token(ASIN, [math.sqrt(3) / 2], [math.pi / 3], approx=True)
assert_token(ASIN, [1.0], [math.pi / 2], approx=True)
assert_token(
ASIN,
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
assert_token(
ASIN,
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
# extra stack elements
assert_token(ASIN, [0, 1.0], [0, math.pi / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASIN([], {})
class TestACOSOperator:
def test_repr(self):
assert repr(ACOS) == "ACOS"
def test_pops(self):
assert ACOS.pops == 1
def test_puts(self):
assert ACOS.puts == 1
def test_no_copy(self):
assert copy(ACOS) is ACOS
assert deepcopy(ACOS) is ACOS
def test_call(self):
assert_token(ACOS, [1.0], [0.0], approx=True)
assert_token(ACOS, [math.sqrt(3) / 2], [math.pi / 6], approx=True)
assert_token(ACOS, [1 / math.sqrt(2)], [math.pi / 4], approx=True)
assert_token(ACOS, [1 / 2], [math.pi / 3], approx=True)
assert_token(ACOS, [0.0], [math.pi / 2], approx=True)
assert_token(
ACOS,
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
# extra stack elements
assert_token(ACOS, [0, 0.0], [0, math.pi / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOS([], {})
class TestATANOperator:
def test_repr(self):
assert repr(ATAN) == "ATAN"
def test_pops(self):
assert ATAN.pops == 1
def test_puts(self):
assert ATAN.puts == 1
def test_no_copy(self):
assert copy(ATAN) is ATAN
assert deepcopy(ATAN) is ATAN
def test_call(self):
assert_token(ATAN, [0.0], [0.0], approx=True)
assert_token(ATAN, [1 / math.sqrt(3)], [math.pi / 6], approx=True)
assert_token(ATAN, [1.0], [math.pi / 4], approx=True)
assert_token(ATAN, [math.sqrt(3)], [math.pi / 3], approx=True)
assert_token(
ATAN,
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
approx=True,
)
assert_token(
ATAN,
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3])],
approx=True,
)
# extra stack elements
assert_token(ATAN, [0, 1.0], [0, math.pi / 4], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATAN([], {})
class TestASINDOperator:
def test_repr(self):
assert repr(ASIND) == "ASIND"
def test_pops(self):
assert ASIND.pops == 1
def test_puts(self):
assert ASIND.puts == 1
def test_no_copy(self):
assert copy(ASIND) is ASIND
assert deepcopy(ASIND) is ASIND
def test_call(self):
assert_token(ASIND, [0.0], [0], approx=True)
assert_token(ASIND, [1 / 2], [30], approx=True)
assert_token(ASIND, [1 / math.sqrt(2)], [45], approx=True)
assert_token(ASIND, [math.sqrt(3) / 2], [60], approx=True)
assert_token(ASIND, [1.0], [90], approx=True)
assert_token(
ASIND,
[np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[np.array([0, 30, 45, 60, 90])],
approx=True,
)
assert_token(
ASIND,
[-np.array([0.0, 1 / 2, 1 / np.sqrt(2), np.sqrt(3) / 2, 1.0])],
[-np.array([0, 30, 45, 60, 90])],
approx=True,
)
# extra stack elements
assert_token(ASIND, [0, 1.0], [0, 90], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASIND([], {})
class TestACOSDOperator:
def test_repr(self):
assert repr(ACOSD) == "ACOSD"
def test_pops(self):
assert ACOSD.pops == 1
def test_puts(self):
assert ACOSD.puts == 1
def test_no_copy(self):
assert copy(ACOSD) is ACOSD
assert deepcopy(ACOSD) is ACOSD
def test_call(self):
assert_token(ACOSD, [1.0], [0], approx=True)
assert_token(ACOSD, [math.sqrt(3) / 2], [30], approx=True)
assert_token(ACOSD, [1 / math.sqrt(2)], [45], approx=True)
assert_token(ACOSD, [1 / 2], [60], approx=True)
assert_token(ACOSD, [0.0], [90], approx=True)
assert_token(
ACOSD,
[np.array([1.0, np.sqrt(3) / 2, 1 / np.sqrt(2), 1 / 2, 0.0])],
[np.array([0, 30, 45, 60, 90])],
approx=True,
)
# extra stack elements
assert_token(ACOSD, [0, 0.0], [0, 90], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOSD([], {})
class TestATANDOperator:
def test_repr(self):
assert repr(ATAND) == "ATAND"
def test_pops(self):
assert ATAND.pops == 1
def test_puts(self):
assert ATAND.puts == 1
def test_no_copy(self):
assert copy(ATAND) is ATAND
assert deepcopy(ATAND) is ATAND
def test_call(self):
assert_token(ATAND, [0.0], [0], approx=True)
assert_token(ATAND, [1 / math.sqrt(3)], [30], approx=True)
assert_token(ATAND, [1.0], [45], approx=True)
assert_token(ATAND, [math.sqrt(3)], [60], approx=True)
assert_token(
ATAND,
[np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[np.array([0, 30, 45, 60])],
approx=True,
)
assert_token(
ATAND,
[-np.array([0.0, 1 / np.sqrt(3), 1.0, np.sqrt(3)])],
[-np.array([0, 30, 45, 60])],
approx=True,
)
# extra stack elements
assert_token(ATAND, [0, 1.0], [0, 45], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATAND([], {})
class TestASINHOperator:
def test_repr(self):
assert repr(ASINH) == "ASINH"
def test_pops(self):
assert ASINH.pops == 1
def test_puts(self):
assert ASINH.puts == 1
def test_no_copy(self):
assert copy(ASINH) is ASINH
assert deepcopy(ASINH) is ASINH
def test_call(self):
assert_token(ASINH, [0.0], [0.0], approx=True)
assert_token(ASINH, [0.5], [GOLDEN_RATIO], approx=True)
assert_token(
ASINH, [np.array([0.0, 0.5])], [np.array([0.0, GOLDEN_RATIO])], approx=True
)
# extra stack elements
assert_token(ASINH, [0, 0.5], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ASINH([], {})
class TestACOSHOperator:
def test_repr(self):
assert repr(ACOSH) == "ACOSH"
def test_pops(self):
assert ACOSH.pops == 1
def test_puts(self):
assert ACOSH.puts == 1
def test_no_copy(self):
assert copy(ACOSH) is ACOSH
assert deepcopy(ACOSH) is ACOSH
def test_call(self):
assert_token(ACOSH, [1.0], [0.0], approx=True)
assert_token(ACOSH, [math.sqrt(5) / 2], [GOLDEN_RATIO], approx=True)
assert_token(
ACOSH,
[np.array([1.0, np.sqrt(5) / 2])],
[np.array([0.0, GOLDEN_RATIO])],
approx=True,
)
# extra stack elements
assert_token(ACOSH, [0, math.sqrt(5) / 2], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ACOSH([], {})
class TestATANHOperator:
def test_repr(self):
assert repr(ATANH) == "ATANH"
def test_pops(self):
assert ATANH.pops == 1
def test_puts(self):
assert ATANH.puts == 1
def test_no_copy(self):
assert copy(ATANH) is ATANH
assert deepcopy(ATANH) is ATANH
def test_call(self):
assert_token(ATANH, [0.0], [0.0], approx=True)
assert_token(ATANH, [math.sqrt(5) / 5], [GOLDEN_RATIO], approx=True)
assert_token(
ATANH,
[np.array([0.0, np.sqrt(5) / 5])],
[np.array([0.0, GOLDEN_RATIO])],
approx=True,
)
# extra stack elements
assert_token(ATANH, [0, math.sqrt(5) / 5], [0, GOLDEN_RATIO], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
ATANH([], {})
class TestISNANOperator:
def test_repr(self):
assert repr(ISNAN) == "ISNAN"
def test_pops(self):
assert ISNAN.pops == 1
def test_puts(self):
assert ISNAN.puts == 1
def test_no_copy(self):
assert copy(ISNAN) is ISNAN
assert deepcopy(ISNAN) is ISNAN
def test_call(self):
assert_token(ISNAN, [2], [False])
assert_token(ISNAN, [float("nan")], [True])
assert_token(ISNAN, [np.array([4, np.nan])], [np.array([False, True])])
assert_token(ISNAN, [np.array([np.nan, 1])], [np.array([True, False])])
# extra stack elements
assert_token(ISNAN, [0, float("nan")], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ISNAN([], {})
class TestISANOperator:
def test_repr(self):
assert repr(ISAN) == "ISAN"
def test_pops(self):
assert ISAN.pops == 1
def test_puts(self):
assert ISAN.puts == 1
def test_no_copy(self):
assert copy(ISAN) is ISAN
assert deepcopy(ISAN) is ISAN
def test_call(self):
assert_token(ISAN, [2], [True])
assert_token(ISAN, [float("nan")], [False])
assert_token(ISAN, [np.array([4, np.nan])], [np.array([True, False])])
assert_token(ISAN, [np.array([np.nan, 1])], [np.array([False, True])])
# extra stack elements
assert_token(ISAN, [0, 2], [0, True])
# not enough stack elements
with pytest.raises(StackUnderflowError):
ISAN([], {})
class TestRINTOperator:
def test_repr(self):
assert repr(RINT) == "RINT"
def test_pops(self):
assert RINT.pops == 1
def test_puts(self):
assert RINT.puts == 1
def test_no_copy(self):
assert copy(RINT) is RINT
assert deepcopy(RINT) is RINT
def test_call(self):
assert_token(RINT, [1.6], [2])
assert_token(RINT, [2.4], [2])
assert_token(RINT, [-1.6], [-2])
assert_token(RINT, [-2.4], [-2])
assert_token(RINT, [np.array([1.6, 2.4])], [np.array([2, 2])])
assert_token(RINT, [np.array([-1.6, -2.4])], [np.array([-2, -2])])
# extra stack elements
assert_token(RINT, [0, 1.6], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
RINT([], {})
class TestNINTOperator:
def test_repr(self):
assert repr(NINT) == "NINT"
def test_pops(self):
assert NINT.pops == 1
def test_puts(self):
assert NINT.puts == 1
def test_no_copy(self):
assert copy(NINT) is NINT
assert deepcopy(NINT) is NINT
def test_call(self):
assert_token(NINT, [1.6], [2])
assert_token(NINT, [2.4], [2])
assert_token(NINT, [-1.6], [-2])
assert_token(NINT, [-2.4], [-2])
assert_token(NINT, [np.array([1.6, 2.4])], [np.array([2, 2])])
assert_token(NINT, [np.array([-1.6, -2.4])], [np.array([-2, -2])])
# extra stack elements
assert_token(NINT, [0, 1.6], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
NINT([], {})
class TestCEILOperator:
def test_repr(self):
assert repr(CEIL) == "CEIL"
def test_pops(self):
assert CEIL.pops == 1
def test_puts(self):
assert CEIL.puts == 1
def test_no_copy(self):
assert copy(CEIL) is CEIL
assert deepcopy(CEIL) is CEIL
def test_call(self):
assert_token(CEIL, [1.6], [2])
assert_token(CEIL, [2.4], [3])
assert_token(CEIL, [-1.6], [-1])
assert_token(CEIL, [-2.4], [-2])
assert_token(CEIL, [np.array([1.6, 2.4])], [np.array([2, 3])])
assert_token(CEIL, [np.array([-1.6, -2.4])], [np.array([-1, -2])])
# extra stack elements
assert_token(CEIL, [0, 1.2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
CEIL([], {})
class TestCEILINGOperator:
def test_repr(self):
assert repr(CEILING) == "CEILING"
def test_pops(self):
assert CEILING.pops == 1
def test_puts(self):
assert CEILING.puts == 1
def test_no_copy(self):
assert copy(CEILING) is CEILING
assert deepcopy(CEILING) is CEILING
def test_call(self):
assert_token(CEILING, [1.6], [2])
assert_token(CEILING, [2.4], [3])
assert_token(CEILING, [-1.6], [-1])
assert_token(CEILING, [-2.4], [-2])
assert_token(CEILING, [np.array([1.6, 2.4])], [np.array([2, 3])])
assert_token(CEILING, [np.array([-1.6, -2.4])], [np.array([-1, -2])])
# extra stack elements
assert_token(CEILING, [0, 1.2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
CEILING([], {})
class TestFLOOROperator:
def test_repr(self):
assert repr(FLOOR) == "FLOOR"
def test_pops(self):
assert FLOOR.pops == 1
def test_puts(self):
assert FLOOR.puts == 1
def test_no_copy(self):
assert copy(FLOOR) is FLOOR
assert deepcopy(FLOOR) is FLOOR
def test_call(self):
assert_token(FLOOR, [1.6], [1])
assert_token(FLOOR, [2.4], [2])
assert_token(FLOOR, [-1.6], [-2])
assert_token(FLOOR, [-2.4], [-3])
assert_token(FLOOR, [np.array([1.6, 2.4])], [np.array([1, 2])])
assert_token(FLOOR, [np.array([-1.6, -2.4])], [np.array([-2, -3])])
# extra stack elements
assert_token(FLOOR, [0, 1.8], [0, 1])
# not enough stack elements
with pytest.raises(StackUnderflowError):
FLOOR([], {})
class TestD2ROperator:
def test_repr(self):
assert repr(D2R) == "D2R"
def test_pops(self):
assert D2R.pops == 1
def test_puts(self):
assert D2R.puts == 1
def test_no_copy(self):
assert copy(D2R) is D2R
assert deepcopy(D2R) is D2R
def test_call(self):
assert_token(D2R, [0], [0.0], approx=True)
assert_token(D2R, [30], [math.pi / 6], approx=True)
assert_token(D2R, [45], [math.pi / 4], approx=True)
assert_token(D2R, [60], [math.pi / 3], approx=True)
assert_token(D2R, [90], [math.pi / 2], approx=True)
assert_token(
D2R,
[np.array([0, 30, 45, 60, 90])],
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
assert_token(
D2R,
[-np.array([0, 30, 45, 60, 90])],
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
approx=True,
)
# extra stack elements
assert_token(D2R, [0, 90], [0, math.pi / 2], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
D2R([], {})
class TestR2DOperator:
def test_repr(self):
assert repr(R2D) == "R2D"
def test_pops(self):
assert R2D.pops == 1
def test_puts(self):
assert R2D.puts == 1
def test_no_copy(self):
assert copy(R2D) is R2D
assert deepcopy(R2D) is R2D
def test_call(self):
assert_token(R2D, [0.0], [0], approx=True)
assert_token(R2D, [math.pi / 6], [30], approx=True)
assert_token(R2D, [math.pi / 4], [45], approx=True)
assert_token(R2D, [math.pi / 3], [60], approx=True)
assert_token(R2D, [math.pi / 2], [90], approx=True)
assert_token(
R2D,
[np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[np.array([0, 30, 45, 60, 90])],
approx=True,
)
assert_token(
R2D,
[-np.array([0.0, np.pi / 6, np.pi / 4, np.pi / 3, np.pi / 2])],
[-np.array([0, 30, 45, 60, 90])],
approx=True,
)
# extra stack elements
assert_token(R2D, [0, math.pi / 2], [0, 90], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
R2D([], {})
class TestYMDHMSOperator:
def test_repr(self):
assert repr(YMDHMS) == "YMDHMS"
def test_pops(self):
assert YMDHMS.pops == 1
def test_puts(self):
assert YMDHMS.puts == 1
def test_no_copy(self):
assert copy(YMDHMS) is YMDHMS
assert deepcopy(YMDHMS) is YMDHMS
def test_call(self):
epoch = datetime(1985, 1, 1, 0, 0, 0, 0)
date1 = datetime(2008, 7, 4, 12, 19, 19, 570865)
date2 = datetime(2019, 6, 26, 12, 31, 6, 930575)
seconds1 = (date1 - epoch).total_seconds()
seconds2 = (date2 - epoch).total_seconds()
assert_token(YMDHMS, [seconds1], [80704121919.570865], approx=True)
assert_token(YMDHMS, [seconds2], [190626123106.930575], approx=True)
assert_token(
YMDHMS,
[np.array([seconds1, seconds2])],
[np.array([80704121919.570865, 190626123106.930575])],
approx=True,
)
# extra stack elements
assert_token(YMDHMS, [0, seconds1], [0, 80704121919.570865], approx=True)
# not enough stack elements
with pytest.raises(StackUnderflowError):
YMDHMS([], {})
class TestSUMOperator:
def test_repr(self):
assert repr(SUM) == "SUM"
def test_pops(self):
assert SUM.pops == 1
def test_puts(self):
assert SUM.puts == 1
def test_no_copy(self):
assert copy(SUM) is SUM
assert deepcopy(SUM) is SUM
def test_call(self):
assert_token(SUM, [2], [2])
assert_token(SUM, [-2], [-2])
assert_token(SUM, [float("nan")], [0])
assert_token(SUM, [np.array([4, -1])], [3])
assert_token(SUM, [np.array([-4, 1])], [-3])
assert_token(SUM, [np.array([1, np.nan, 3])], [4])
assert_token(SUM, [np.array([np.nan])], [0])
# extra stack elements
assert_token(SUM, [0, 2], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
SUM([], {})
class TestDIFFOperator:
def test_repr(self):
assert repr(DIF) == "DIF"
def test_pops(self):
assert DIF.pops == 1
def test_puts(self):
assert DIF.puts == 1
def test_no_copy(self):
assert copy(DIF) is DIF
assert deepcopy(DIF) is DIF
def test_call(self):
assert_token(DIF, [2], [np.array([np.nan])])
assert_token(DIF, [np.array([1, 2])], [np.array([np.nan, 1])])
assert_token(DIF, [np.array([1, 2, 5])], [np.array([np.nan, 1, 3])])
assert_token(
DIF, [np.array([1, np.nan, 5])], [np.array([np.nan, np.nan, np.nan])]
)
# extra stack elements
assert_token(DIF, [0, 2], [0, np.array([np.nan])])
with pytest.raises(StackUnderflowError):
DIF([], {})
class TestDUPOperator:
def test_repr(self):
assert repr(DUP) == "DUP"
def test_pops(self):
assert DUP.pops == 1
def test_puts(self):
assert DUP.puts == 2
def test_no_copy(self):
assert copy(DUP) is DUP
assert deepcopy(DUP) is DUP
def test_call(self):
assert_token(DUP, [2], [2, 2])
assert_token(DUP, [np.array([4, -1])], [np.array([4, -1]), np.array([4, -1])])
# extra stack elements
assert_token(DUP, [0, 2], [0, 2, 2])
with pytest.raises(StackUnderflowError):
DUP([], {})
class TestDIVOperator:
def test_repr(self):
assert repr(DIV) == "DIV"
def test_pops(self):
assert DIV.pops == 2
def test_puts(self):
assert DIV.puts == 1
def test_no_copy(self):
assert copy(DIV) is DIV
assert deepcopy(DIV) is DIV
def test_call(self):
assert_token(DIV, [10, 2], [5])
assert_token(DIV, [10, np.array([2, 5])], [np.array([5, 2])])
assert_token(DIV, [np.array([10, 4]), 2], [np.array([5, 2])])
assert_token(DIV, [np.array([8, 16]), np.array([2, 4])], [np.array([4, 4])])
# extra stack elements
assert_token(DIV, [0, 10, 2], [0, 5])
# not enough stack elements
with pytest.raises(StackUnderflowError):
DIV([], {})
with pytest.raises(StackUnderflowError):
DIV([1], {})
class TestPOWOperator:
def test_repr(self):
assert repr(POW) == "POW"
def test_pops(self):
assert POW.pops == 2
def test_puts(self):
assert POW.puts == 1
def test_no_copy(self):
assert copy(POW) is POW
assert deepcopy(POW) is POW
def test_call(self):
assert_token(POW, [1, 2], [1])
assert_token(POW, [2, 2], [4])
assert_token(POW, [2, 4], [16])
assert_token(POW, [2, np.array([1, 2, 3])], [np.array([2, 4, 8])])
assert_token(POW, [np.array([1, 2, 3]), 2], [np.array([1, 4, 9])])
assert_token(POW, [np.array([2, 3]), np.array([5, 6])], [np.array([32, 729])])
# extra stack elements
assert_token(POW, [0, 2, 4], [0, 16])
# not enough stack elements
with pytest.raises(StackUnderflowError):
POW([], {})
with pytest.raises(StackUnderflowError):
POW([1], {})
class TestFMODOperator:
def test_repr(self):
assert repr(FMOD) == "FMOD"
assert FMOD.pops == 2
assert FMOD.puts == 1
def test_pops(self):
assert repr(FMOD) == "FMOD"
assert FMOD.pops == 2
assert FMOD.puts == 1
def test_puts(self):
assert repr(FMOD) == "FMOD"
assert FMOD.pops == 2
assert FMOD.puts == 1
def test_no_copy(self):
assert copy(FMOD) is FMOD
assert deepcopy(FMOD) is FMOD
def test_call(self):
assert_token(FMOD, [1, 2], [1])
assert_token(FMOD, [2, 10], [2])
assert_token(FMOD, [12, 10], [2])
assert_token(FMOD, [13, np.array([10, 100])], [np.array([3, 13])])
assert_token(FMOD, [np.array([7, 15]), 10], [np.array([7, 5])])
assert_token(FMOD, [np.array([7, 15]), np.array([10, 5])], [np.array([7, 0])])
# extra stack elements
assert_token(FMOD, [0, 12, 10], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
FMOD([], {})
with pytest.raises(StackUnderflowError):
FMOD([1], {})
class TestMINOperator:
def test_repr(self):
assert repr(MIN) == "MIN"
def test_pops(self):
assert MIN.pops == 2
def test_puts(self):
assert MIN.puts == 1
def test_no_copy(self):
assert copy(MIN) is MIN
assert deepcopy(MIN) is MIN
def test_call(self):
assert_token(MIN, [2, 3], [2])
assert_token(MIN, [3, 2], [2])
assert_token(MIN, [2, np.array([1, 3])], [np.array([1, 2])])
assert_token(MIN, [np.array([1, 3]), 2], [np.array([1, 2])])
assert_token(MIN, [np.array([2, 3]), np.array([3, 2])], [np.array([2, 2])])
# # extra stack elements
assert_token(MIN, [0, 2, 3], [0, 2])
# not enough stack elements
with pytest.raises(StackUnderflowError):
MIN([], {})
with pytest.raises(StackUnderflowError):
MIN([1], {})
class TestMAXOperator:
def test_repr(self):
assert repr(MAX) == "MAX"
def test_pops(self):
assert MAX.pops == 2
def test_puts(self):
assert MAX.puts == 1
def test_no_copy(self):
assert copy(MAX) is MAX
assert deepcopy(MAX) is MAX
def test_call(self):
assert_token(MAX, [2, 3], [3])
assert_token(MAX, [3, 2], [3])
assert_token(MAX, [2, np.array([1, 3])], [np.array([2, 3])])
assert_token(MAX, [np.array([1, 3]), 2], [np.array([2, 3])])
assert_token(MAX, [np.array([2, 3]), np.array([3, 2])], [ | np.array([3, 3]) | numpy.array |
# -*- coding: utf-8 -*
"""
:py:class:`GenerateLabelFieldReader`
"""
import numpy as np
from senta.common.register import RegisterSet
from senta.common.rule import DataShape, FieldLength, InstanceName
from senta.data.field_reader.base_field_reader import BaseFieldReader
from senta.data.util_helper import generate_pad_batch_data
from senta.modules.token_embedding.custom_fluid_embedding import CustomFluidTokenEmbedding
@RegisterSet.field_reader.register
class GenerateLabelFieldReader(BaseFieldReader):
"""seq2seq label的专用field_reader
"""
def __init__(self, field_config):
"""
:param field_config:
"""
BaseFieldReader.__init__(self, field_config=field_config)
self.paddle_version_code = 1.6
if self.field_config.tokenizer_info:
tokenizer_class = RegisterSet.tokenizer.__getitem__(self.field_config.tokenizer_info["type"])
params = None
if self.field_config.tokenizer_info.__contains__("params"):
params = self.field_config.tokenizer_info["params"]
self.tokenizer = tokenizer_class(vocab_file=self.field_config.vocab_path,
split_char=self.field_config.tokenizer_info["split_char"],
unk_token=self.field_config.tokenizer_info["unk_token"],
params=params)
if self.field_config.embedding_info and self.field_config.embedding_info["use_reader_emb"]:
self.token_embedding = CustomFluidTokenEmbedding(emb_dim=self.field_config.embedding_info["emb_dim"],
vocab_size=self.tokenizer.vocabulary.get_vocab_size())
def init_reader(self):
""" 初始化reader格式
:return: reader的shape[]、type[]、level[]
"""
shape = []
types = []
levels = []
"""train_tar_ids"""
if self.field_config.data_type == DataShape.STRING:
"""src_ids"""
shape.append([-1, self.field_config.max_seq_len])
levels.append(0)
types.append('int64')
else:
raise TypeError("GenerateLabelFieldReader's data_type must be string")
"""mask_ids"""
shape.append([-1, self.field_config.max_seq_len])
levels.append(0)
types.append('float32')
"""seq_lens"""
shape.append([-1])
levels.append(0)
types.append('int64')
"""infer_tar_ids"""
shape.append([-1, self.field_config.max_seq_len, 1])
levels.append(0)
types.append('int64')
"""mask_ids"""
shape.append([-1, self.field_config.max_seq_len])
levels.append(0)
types.append('float32')
"""seq_lens"""
shape.append([-1])
levels.append(0)
types.append('int64')
return shape, types, levels
def convert_texts_to_ids(self, batch_text):
"""将一个batch的明文text转成id
:param batch_text:
:return:
"""
train_src_ids = []
infer_src_ids = []
for text in batch_text:
if self.field_config.need_convert:
tokens = self.tokenizer.tokenize(text)
src_id = self.tokenizer.convert_tokens_to_ids(tokens)
else:
src_id = text.split(" ")
# 加上截断策略
if len(src_id) > self.field_config.max_seq_len - 1:
src_id = src_id[0:self.field_config.max_seq_len - 1]
train_src_id = [self.field_config.label_start_id] + src_id
infer_src_id = src_id + [self.field_config.label_end_id]
train_src_ids.append(train_src_id)
infer_src_ids.append(infer_src_id)
return_list = []
train_label_ids, train_label_mask, label_lens = generate_pad_batch_data(train_src_ids,
pad_idx=self.field_config.padding_id,
return_input_mask=True,
return_seq_lens=True,
paddle_version_code=self.paddle_version_code)
infer_label_ids, infer_label_mask, label_lens = generate_pad_batch_data(infer_src_ids,
pad_idx=self.field_config.padding_id,
return_input_mask=True,
return_seq_lens=True,
paddle_version_code=self.paddle_version_code)
infer_label_ids = | np.reshape(infer_label_ids, (infer_label_ids.shape[0], infer_label_ids.shape[1], 1)) | numpy.reshape |
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
####################################################################################
def test_arccosh():
value = np.abs(np.random.rand(1).item()) + 1
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) + 1
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
####################################################################################
def test_arcsin():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
####################################################################################
def test_arcsinh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
####################################################################################
def test_arctan():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
####################################################################################
def test_arctan2():
xy = np.random.rand(2) * 2 - 1
assert np.round(NumCpp.arctan2Scaler(xy[1], xy[0]), 9) == np.round(np.arctan2(xy[1], xy[0]), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayX = NumCpp.NdArray(shape)
cArrayY = NumCpp.NdArray(shape)
xy = np.random.rand(*shapeInput, 2) * 2 - 1
xData = xy[:, :, 0].reshape(shapeInput)
yData = xy[:, :, 1].reshape(shapeInput)
cArrayX.setArray(xData)
cArrayY.setArray(yData)
assert np.array_equal(np.round(NumCpp.arctan2Array(cArrayY, cArrayX), 9), np.round(np.arctan2(yData, xData), 9))
####################################################################################
def test_arctanh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
####################################################################################
def test_argmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
####################################################################################
def test_argmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
####################################################################################
def test_argsort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): # noqa
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]):
allPass = False
break
assert allPass
####################################################################################
def test_argwhere():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
####################################################################################
def test_around():
value = np.abs(np.random.rand(1).item()) * np.random.randint(1, 10, [1, ]).item()
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert NumCpp.aroundScaler(value, numDecimalsRound) == np.round(value, numDecimalsRound)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert np.array_equal(NumCpp.aroundArray(cArray, numDecimalsRound), np.round(data, numDecimalsRound))
####################################################################################
def test_array_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, shapeInput)
data2 = np.random.randint(1, 100, shapeInput)
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
cArray3 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
####################################################################################
def test_array_equiv():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
data1 = np.random.randint(1, 100, shapeInput1)
data3 = np.random.randint(1, 100, shapeInput3)
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
cArray3 = NumCpp.NdArrayComplexDouble(shape3)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
imag3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data3 = real3 + 1j * imag3
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
####################################################################################
def test_asarray():
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToUint32(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.uint32))
assert cArrayCast.dtype == np.uint32
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex128))
assert cArrayCast.dtype == np.complex128
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex64))
assert cArrayCast.dtype == np.complex64
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToDouble(cArray).getNumpyArray()
warnings.filterwarnings('ignore', category=np.ComplexWarning)
assert np.array_equal(cArrayCast, data.astype(np.double))
warnings.filters.pop() # noqa
assert cArrayCast.dtype == np.double
####################################################################################
def test_average():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
####################################################################################
def test_averageWeighted():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
####################################################################################
def test_binaryRepr():
value = np.random.randint(0, np.iinfo(np.uint64).max, [1, ], dtype=np.uint64).item()
assert NumCpp.binaryRepr(np.uint64(value)) == np.binary_repr(value, np.iinfo(np.uint64).bits)
####################################################################################
def test_bincount():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
assert np.array_equal(NumCpp.bincount(cArray, 0).flatten(), np.bincount(data.flatten(), minlength=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincount(cArray, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, 0).flatten(),
np.bincount(data.flatten(), minlength=0, weights=weights.flatten()))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength, weights=weights.flatten()))
####################################################################################
def test_bitwise_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_and(cArray1, cArray2), np.bitwise_and(data1, data2))
####################################################################################
def test_bitwise_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.bitwise_not(cArray), np.bitwise_not(data))
####################################################################################
def test_bitwise_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_or(cArray1, cArray2), np.bitwise_or(data1, data2))
####################################################################################
def test_bitwise_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_xor(cArray1, cArray2), np.bitwise_xor(data1, data2))
####################################################################################
def test_byteswap():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.byteswap(cArray).shape, shapeInput)
####################################################################################
def test_cbrt():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cbrtArray(cArray), 9), np.round(np.cbrt(data), 9))
####################################################################################
def test_ceil():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.ceilArray(cArray), 9), np.round(np.ceil(data), 9))
####################################################################################
def test_center_of_mass():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.NONE).flatten(), 9),
np.round(meas.center_of_mass(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for col in range(data.shape[1]):
coms.append(np.round(meas.center_of_mass(data[:, col])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.ROW).flatten(), 9), np.round(coms, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for row in range(data.shape[0]):
coms.append(np.round(meas.center_of_mass(data[row, :])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.COL).flatten(), 9), np.round(coms, 9))
####################################################################################
def test_clip():
value = np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue)
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue)) # noqa
####################################################################################
def test_column_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.column_stack(cArray1, cArray2, cArray3, cArray4),
np.column_stack([data1, data2, data3, data4]))
####################################################################################
def test_complex():
real = np.random.rand(1).astype(np.double).item()
value = complex(real)
assert np.round(NumCpp.complexScaler(real), 9) == np.round(value, 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.complexScaler(components[0], components[1]), 9) == np.round(value, 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
assert np.array_equal(np.round(NumCpp.complexArray(realArray), 9), np.round(real + 1j * np.zeros_like(real), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
imagArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
imagArray.setArray(imag)
assert np.array_equal(np.round(NumCpp.complexArray(realArray, imagArray), 9), np.round(real + 1j * imag, 9))
####################################################################################
def test_concatenate():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.NONE).flatten(),
np.concatenate([data1.flatten(), data2.flatten(), data3.flatten(), data4.flatten()]))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.concatenate([data1, data2, data3, data4], axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.concatenate([data1, data2, data3, data4], axis=1))
####################################################################################
def test_conj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.conjScaler(value), 9) == np.round(np.conj(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.conjArray(cArray), 9), np.round(np.conj(data), 9))
####################################################################################
def test_contains():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
####################################################################################
def test_copy():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.copy(cArray), data)
####################################################################################
def test_copysign():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.copysign(cArray1, cArray2), np.copysign(data1, data2))
####################################################################################
def test_copyto():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray()
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
assert np.array_equal(NumCpp.copyto(cArray2, cArray1), data1)
####################################################################################
def test_cos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
####################################################################################
def test_cosh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
####################################################################################
def test_count_nonzero():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
####################################################################################
def test_cross():
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
####################################################################################
def test_cube():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
####################################################################################
def test_cumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
####################################################################################
def test_cumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
####################################################################################
def test_deg2rad():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.deg2radScaler(value), 9) == np.round(np.deg2rad(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.deg2radArray(cArray), 9), np.round(np.deg2rad(data), 9))
####################################################################################
def test_degrees():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.degreesScaler(value), 9) == np.round(np.degrees(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.degreesArray(cArray), 9), np.round(np.degrees(data), 9))
####################################################################################
def test_deleteIndices():
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.NONE).flatten(),
np.delete(data, indicesPy, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.ROW),
np.delete(data, indicesPy, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.COL),
np.delete(data, indicesPy, axis=1))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, shape.size(), [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.NONE).flatten(),
np.delete(data, index, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.ROW), np.delete(data, index, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.COL), np.delete(data, index, axis=1))
####################################################################################
def test_diag():
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
elements = np.random.randint(1, 100, shapeInput)
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
####################################################################################
def test_diagflat():
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
####################################################################################
def test_diagonal():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
####################################################################################
def test_diff():
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL).astype(np.uint32), np.diff(data, axis=1))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL), np.diff(data, axis=1))
####################################################################################
def test_divide():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
####################################################################################
def test_dot():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
####################################################################################
def test_empty():
shapeInput = np.random.randint(1, 100, [2, ])
cArray = NumCpp.emptyRowCol(shapeInput[0].item(), shapeInput[1].item())
assert cArray.shape[0] == shapeInput[0]
assert cArray.shape[1] == shapeInput[1]
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.emptyShape(shape)
assert cArray.shape[0] == shape.rows
assert cArray.shape[1] == shape.cols
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.empty_like(cArray1)
assert cArray2.shape().rows == shape.rows
assert cArray2.shape().cols == shape.cols
assert cArray2.size() == shapeInput.prod()
####################################################################################
def test_endianess():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert NumCpp.endianess(cArray) == NumCpp.Endian.NATIVE
####################################################################################
def test_equal():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 10, [shape.rows, shape.cols])
data2 = np.random.randint(0, 10, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
####################################################################################
def test_exp2():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.exp2Scaler(value), 9) == np.round(np.exp2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.exp2Array(cArray), 9), np.round(np.exp2(data), 9))
####################################################################################
def test_exp():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
####################################################################################
def test_eye():
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1D(shapeInput, randK), np.eye(shapeInput, k=randK))
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1DComplex(shapeInput, randK),
np.eye(shapeInput, k=randK) + 1j * np.zeros([shapeInput, shapeInput]))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2D(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2DComplex(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShape(cShape, randK), np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShapeComplex(cShape, randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
####################################################################################
def test_fill_diagonal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
NumCpp.fillDiagonal(cArray, 666)
np.fill_diagonal(data, 666)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_find():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.find(cMaskArray).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy)
####################################################################################
def test_findN():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.findN(cMaskArray, 8).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy[:8])
####################################################################################
def fix():
value = np.random.randn(1).item() * 100
assert NumCpp.fixScaler(value) == np.fix(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.fixArray(cArray), np.fix(data))
####################################################################################
def test_flatten():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatten(cArray).getNumpyArray(), np.resize(data, [1, data.size]))
####################################################################################
def test_flatnonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatnonzero(cArray).getNumpyArray().flatten(), np.flatnonzero(data))
####################################################################################
def test_flip():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flip(cArray, NumCpp.Axis.NONE).getNumpyArray(),
np.flip(data.reshape(1, data.size), axis=1).reshape(shapeInput))
####################################################################################
def test_fliplr():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fliplr(cArray).getNumpyArray(), np.fliplr(data))
####################################################################################
def test_flipud():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flipud(cArray).getNumpyArray(), np.flipud(data))
####################################################################################
def test_floor():
value = np.random.randn(1).item() * 100
assert NumCpp.floorScaler(value) == np.floor(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.floorArray(cArray), np.floor(data))
####################################################################################
def test_floor_divide():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.floor_divideScaler(value1, value2) == np.floor_divide(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.floor_divideArray(cArray1, cArray2), np.floor_divide(data1, data2))
####################################################################################
def test_fmax():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fmaxScaler(value1, value2) == np.fmax(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmaxArray(cArray1, cArray2), np.fmax(data1, data2))
####################################################################################
def test_fmin():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fminScaler(value1, value2) == np.fmin(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fminArray(cArray1, cArray2), np.fmin(data1, data2))
####################################################################################
def test_fmod():
value1 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
value2 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
assert NumCpp.fmodScaler(value1, value2) == np.fmod(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmodArray(cArray1, cArray2), np.fmod(data1, data2))
####################################################################################
def test_fromfile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = r'C:\Temp'
if not os.path.exists(tempDir):
os.mkdir(tempDir)
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.fromfile(tempFile, '').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump')
NumCpp.tofile(cArray, tempFile, '\n')
assert os.path.exists(tempFile + '.txt')
data2 = NumCpp.fromfile(tempFile + '.txt', '\n').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile + '.txt')
####################################################################################
def test_fromiter():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
####################################################################################
def test_full():
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquare(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquareComplex(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowCol(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowColComplex(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
####################################################################################
def test_full_like():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
value = np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_like(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_likeComplex(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
####################################################################################
def test_gcd():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.gcdScaler(value1, value2) == np.gcd(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(20, 100, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 1000, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.gcdArray(cArray) == np.gcd.reduce(data) # noqa
####################################################################################
def test_gradient():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
####################################################################################
def test_greater():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
####################################################################################
def test_greater_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
####################################################################################
def test_histogram():
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numBins = np.random.randint(10, 30, [1, ]).item()
histogram, bins = NumCpp.histogram(cArray, numBins)
h, b = np.histogram(data, numBins)
assert np.array_equal(histogram.getNumpyArray().flatten().astype(np.int32), h)
assert np.array_equal(np.round(bins.getNumpyArray().flatten(), 9), np.round(b, 9))
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
binEdges = np.linspace(data.min(), data.max(), 15, endpoint=True)
cBinEdges = NumCpp.NdArray(1, binEdges.size)
cBinEdges.setArray(binEdges)
histogram = NumCpp.histogram(cArray, cBinEdges)
h, _ = np.histogram(data, binEdges)
assert np.array_equal(histogram.flatten().astype(np.int32), h)
####################################################################################
def test_hstack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.hstack(cArray1, cArray2, cArray3, cArray4),
np.hstack([data1, data2, data3, data4]))
####################################################################################
def test_hypot():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.hypotScaler(value1, value2) == np.hypot(value1, value2)
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
value3 = np.random.randn(1).item() * 100 + 1000
assert (np.round(NumCpp.hypotScalerTriple(value1, value2, value3), 9) ==
np.round(np.sqrt(value1**2 + value2**2 + value3**2), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.hypotArray(cArray1, cArray2), 9),
np.round(np.hypot(data1, data2), 9))
####################################################################################
def test_identity():
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identity(squareSize).getNumpyArray(), np.identity(squareSize))
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identityComplex(squareSize).getNumpyArray(),
np.identity(squareSize) + 1j * np.zeros([squareSize, squareSize]))
####################################################################################
def test_imag():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.imagScaler(value), 9) == np.round(np.imag(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.imagArray(cArray), 9), np.round(np.imag(data), 9))
####################################################################################
def test_interp():
endPoint = np.random.randint(10, 20, [1, ]).item()
numPoints = np.random.randint(50, 100, [1, ]).item()
resample = np.random.randint(2, 5, [1, ]).item()
xpData = np.linspace(0, endPoint, numPoints, endpoint=True)
fpData = np.sin(xpData)
xData = np.linspace(0, endPoint, numPoints * resample, endpoint=True)
cXp = NumCpp.NdArray(1, numPoints)
cFp = NumCpp.NdArray(1, numPoints)
cX = NumCpp.NdArray(1, numPoints * resample)
cXp.setArray(xpData)
cFp.setArray(fpData)
cX.setArray(xData)
assert np.array_equal(np.round(NumCpp.interp(cX, cXp, cFp).flatten(), 9),
np.round(np.interp(xData, xpData, fpData), 9))
####################################################################################
def test_intersect1d():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.intersect1d(cArray1, cArray2).getNumpyArray().flatten(), np.intersect1d(data1, data2))
####################################################################################
def test_invert():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.invert(cArray).getNumpyArray(), np.invert(data))
####################################################################################
def test_isclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.rand(shape.rows, shape.cols)
data2 = data1 + np.random.randn(shape.rows, shape.cols) * 1e-5
cArray1.setArray(data1)
cArray2.setArray(data2)
rtol = 1e-5
atol = 1e-8
assert np.array_equal(NumCpp.isclose(cArray1, cArray2, rtol, atol).getNumpyArray(),
np.isclose(data1, data2, rtol=rtol, atol=atol))
####################################################################################
def test_isinf():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isinfScaler(value) == np.isinf(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.inf
cArray.setArray(data)
assert np.array_equal(NumCpp.isinfArray(cArray), np.isinf(data))
####################################################################################
def test_isnan():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isnanScaler(value) == np.isnan(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.nan
cArray.setArray(data)
assert np.array_equal(NumCpp.isnanArray(cArray), np.isnan(data))
####################################################################################
def test_lcm():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.lcmScaler(value1, value2) == np.lcm(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(2, 10, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 100, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.lcmArray(cArray) == np.lcm.reduce(data) # noqa
####################################################################################
def test_ldexp():
value1 = np.random.randn(1).item() * 100
value2 = np.random.randint(1, 20, [1, ]).item()
assert np.round(NumCpp.ldexpScaler(value1, value2), 9) == np.round(np.ldexp(value1, value2), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayUInt8(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100
data2 = np.random.randint(1, 20, [shape.rows, shape.cols], dtype=np.uint8)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.ldexpArray(cArray1, cArray2), 9), np.round(np.ldexp(data1, data2), 9))
####################################################################################
def test_left_shift():
shapeInput = np.random.randint(20, 100, [2, ])
bitsToshift = np.random.randint(1, 32, [1, ]).item()
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, np.iinfo(np.uint32).max, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.left_shift(cArray, bitsToshift).getNumpyArray(),
np.left_shift(data, bitsToshift))
####################################################################################
def test_less():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
####################################################################################
def test_less_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
####################################################################################
def test_load():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.load(tempFile).reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
####################################################################################
def test_linspace():
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, True).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=True), 9))
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, False).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=False), 9))
####################################################################################
def test_log():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
####################################################################################
def test_log10():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
components = np.random.randn(2).astype(np.double) * 100 + 100
value = complex(components[0], components[1])
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
####################################################################################
def test_log1p():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log1pScaler(value), 9) == np.round(np.log1p(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log1pArray(cArray), 9), np.round(np.log1p(data), 9))
####################################################################################
def test_log2():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log2Scaler(value), 9) == np.round(np.log2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log2Array(cArray), 9), np.round(np.log2(data), 9))
####################################################################################
def test_logical_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_and(cArray1, cArray2).getNumpyArray(), np.logical_and(data1, data2))
####################################################################################
def test_logical_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.logical_not(cArray).getNumpyArray(), np.logical_not(data))
####################################################################################
def test_logical_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_or(cArray1, cArray2).getNumpyArray(), np.logical_or(data1, data2))
####################################################################################
def test_logical_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_xor(cArray1, cArray2).getNumpyArray(), np.logical_xor(data1, data2))
####################################################################################
def test_matmul():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = | np.random.randint(0, 20, [shape1.rows, shape1.cols]) | numpy.random.randint |
import numpy as np
class Analytic_Solver():
def __init__(self):
pass
def wrap_angles(self, radians):
"""Wraps radian angle to (-pi, pi]"""
return (radians + np.pi) % (2 * np.pi) - np.pi
def get_axis(self, pose):
origin = pose[:3]
x = origin + self.R(np.array([3,0,0]), ref=pose[3:])
y = origin + self.R(np.array([0,3,0]), ref=pose[3:])
z = origin + self.R(np.array([0,0,3]), ref=pose[3:])
return origin, x, y, z
def T(self, displacement, ref):
""" translates a pose (ref) w.r.t. a displacement
displacement can be a 3D vector or 6D pose
ref should be [x,y,z]"""
assert len(ref) == 3, 'Invalid Reference: format as (x, y, z)'
if len(displacement) > 3:
return np.concatenate([displacement[:3] + ref, displacement[3:]])
return np.array(displacement[:3] + ref)
def Rx(self, effector, phi):
R = np.matrix([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]])
if len(effector) > 3:
return np.concatenate([np.array(np.matmul(R, np.array([effector[:3]]).T)).reshape(3), effector[3:] + ref])
return np.array( | np.matmul(R, effector.T) | numpy.matmul |
import numpy as np
import cv2 as cv
# Create a black image
#img = np.ones((512,512,3), np.uint8)*128 # gray background
from sage.all import *
import numpy as np
import pandas,sys
import statsmodels.api as sm
#print(img)
# Draw a diagonal blue line with thickness of 5 px
#cv.line(img,(0,0),(511,511),(255,0,0),5)
# drawing a rectangle:
#cv.rectangle(img,(384,0),(510,128),(0,255,0),3)
# drawing a circle:
#cv.circle(img,(447,63), 63, (0,0,255), -1)
import music21 as m21
from itertools import product
pitchToZ12 = dict(zip(["C","C#","D","D#","E","F","F#","G","G#","A","A#","B"],range(12)))
Z12ToPitch = dict(zip(range(12),["C","C#","D","D#","E","F","F#","G","G#","A","A#","B"]))
import numpy as np
def xml_to_list(xml):
xml_data = m21.converter.parse(xml)
score = []
for part in xml_data.parts:
parts = []
print(part)
for note in part.flat.notesAndRests:
if type(note)==m21.note.Rest:
print("rest", note, note.duration.quarterLength)
duration = float(note.duration.quarterLength)
vol = 32 #note.volume.velocity
pitches= tuple([64])
parts.append(tuple([float(note.offset),pitches,duration,vol,1]))
elif type(note)==m21.chord.Chord:
print("chord ",note,note.duration.quarterLength)
pitches = sorted([e.pitch.midi for e in note]) # todo: think about chords
vol = note[0].volume.velocity
if vol is None:
vol = int(note[0].volume.realized * 127)
else:
vol = int(vol)
duration = float(note.duration.quarterLength)
parts.append(tuple([float(note.offset),tuple(pitches),duration,vol,0]))
else:
print("note", note,note.duration.quarterLength)
start = note.offset
duration = float(note.quarterLength)
pitches = tuple([note.pitch.midi])
#print(pitch,duration,note.volume)
vol = note.volume.velocity
if vol is None:
vol = int(note.volume.realized * 127)
parts.append(tuple([float(note.offset),pitches,duration,vol,0]) )
score.append(parts)
print( [ len(part) for part in score])
return score
def parseXml(fp):
return xml_to_list(fp)
def draw_circle(image, pp, color, radius=0):
x,y = pp
image = cv.circle(image, (x,y), color=color, thickness=cv.FILLED,radius=radius)
return image
def draw_line(image, start,end, color):
image = cv.line(image, start, end, color, thickness=2)
return image
#img = draw_point(img,(256,256),(255,255,255))
def ff(a=1,b=6,c=-14,x=1,y=1/2,z=np.complex(0,1)/3):
i = np.complex(0,1)
return (lambda t: x*np.exp(a*i*t)+y*np.exp(b*i*t)+z*np.exp(c*i*t))
def FF(nn = [1,3],m=2, k=1, aa=[1,2]):
if all([n % m == k for n in nn]) and len(aa)==len(nn) and np.gcd(k,m)==1:
i = np.complex(0,1)
return (lambda t: sum([ aa[j]*np.exp(nn[j]*i*t) for j in range(len(aa)) ]))
else:
return None
def draw_curve(img, ff, mm, color, rr=120,number_of_points = 100,return_points = False):
points = []
def compute_point(ff,k,start,step,rr,mm):
t = start+k*step
z = ff(t)
x,y = z.real,z.imag
# scale:
x = x*rr
y = y*rr
# translate:
x,y = x+mm[0],y+mm[1]
# round to integers
x,y = int(x),int(y)
return x,y
start = 0.0
end = 2*np.pi*14
N = number_of_points
step = (end-start)/N
for k in range(N-1):
x,y = compute_point(ff,k,start,step,rr,mm)
x2,y2 = compute_point(ff,k+1,start,step,rr,mm)
#print(x,y)
points.append((x,y))
img = draw_line(img, (x,y),(x2,y2), color=color)
points.append((x2,y2))
if return_points: return img,points
return img
def color_img(img):
ret, thresh = cv.threshold(img, 127, 255, 0)
num_labels, labels = cv.connectedComponents(thresh,connectivity=8)
# Map component labels to hue val
label_hue = np.uint8(179*labels/np.max(labels))
blank_ch = 255*np.ones_like(label_hue)
labeled_img = cv.merge([label_hue, blank_ch, blank_ch])
# cvt to BGR for display
labeled_img = cv.cvtColor(labeled_img, cv.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue==0] = 0
return labeled_img
#img = draw_circle(img,(256,256), 63, (0,0,255),number_of_points = 10000)
def getImgNrs(start_duration,end_duration,bpm,fps):
N_img_start = int(np.round(fps*60*start_duration/(bpm),0))
N_img_end = int(np.round(fps*60*end_duration/bpm,0))
return (N_img_start,N_img_end)
def convertScore(scores,bpm=70,fps=25,verbose=False):
#determine max durations:
maxDurs = [0 for k in range(len(scores))]
startsAndDurs = [0 for k in range(len(scores))]
pitchSet = set([])
volumeSet = set([])
partCounter = 0
for part in scores:
for note in part:
start,pitches, duration, volume, rest = note
maxDurs[partCounter] += duration
if startsAndDurs[partCounter] < start+duration:
startsAndDurs[partCounter] = start+duration
partCounter+=1
maxDur = np.max(startsAndDurs)
print(startsAndDurs)
print(maxDur)
print(bpm)
Nimgs = int(np.round(60*fps*maxDur/bpm,0))
print(Nimgs)
imgs2Notes = dict([])
#fill dictionary with notes per image
for part in scores:
dur = 0
for note in part:
start,pitches, duration, volume, rest = note
print(start,pitches,duration,volume,rest)
for pitch in pitches:
pitchSet.add(pitch)
volumeSet.add(volume)
start_img, end_img = getImgNrs(start_duration=start,end_duration = start+duration,bpm=bpm,fps=fps)
if verbose: print(note,start_img,end_img)
for k in range(start_img,end_img+1):
if k in imgs2Notes.keys():
imgs2Notes[k].append((note,start_img,end_img))
else:
imgs2Notes[k] = [(note,start_img,end_img)]
dur += duration
return imgs2Notes,pitchSet,volumeSet
def create_video(imgs,videoname="./opencv_videos/video.avi",fps=25):
fourcc = cv.VideoWriter_fourcc(*"X264")
height,width,x = imgs[0].shape
print(width,height,x,fps,videoname)
framesPerSecond = fps
video = cv.VideoWriter(videoname, fourcc, framesPerSecond, (width, height))
cnt = 0
for img in imgs:
#print(cnt,img.shape)
video.write(img)
cnt += 1
video.release()
return video
def compute_color(pitch,volume,t,N,noteCounter,lN,start_img,end_img):
tScaled = (t-start_img)/(end_img-start_img+1)
return (int(tScaled*pitch*2*np.sin(2*np.pi*t/N)),int(tScaled*volume*2*np.sin(2*np.pi*t/N)),int(tScaled*(noteCounter/lN)*128))
def compute_radius(pitch,volume,t,N,noteCounter,lN,start_img,end_img):
tScaled = (t-start_img)/(end_img-start_img+1)
return max(1,int(tScaled*np.abs((volume)*np.cos(2*np.pi*t/N))))
#!/usr/bin/python
import numpy as np
import random
# Check if a point is inside a rectangle
def rect_contains(rect, point) :
if point[0] < rect[0] :
return False
elif point[1] < rect[1] :
return False
elif point[0] > rect[2] :
return False
elif point[1] > rect[3] :
return False
return True
# Draw a point
def draw_point(img, p, color ) :
cv.circle( img, p, 2, color, cv.FILLED, cv2.CV_AA, 0 )
# Draw voronoi diagram
def draw_voronoi(img, subdiv,color) :
( facets, centers) = subdiv.getVoronoiFacetList([])
r,g,b = color
lf = len(facets)
for i in range(0,len(facets)) :
ifacet_arr = []
for f in facets[i] :
ifacet_arr.append(f)
ifacet = np.array(ifacet_arr, np.int)
color = (255-i/lf*r, i/lf*g, i/lf*b)
cv.fillConvexPoly(img, ifacet, color, cv.LINE_AA, 0);
ifacets = | np.array([ifacet]) | numpy.array |
# UCSC Genome Browser
import os
import sys
import numpy as np
import pandas as pd
from Bio import SeqIO
from Bio.Seq import Seq
from tqdm import tqdm
from itertools import repeat
import wget
import ast
import multiprocessing as mp
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.pool import NullPool
_db_url = {
"drivername": 'mysql+pymysql',
"host": "genome-mysql.cse.ucsc.edu",
"port": "3306",
"username": "genome",
"password": "",
"database": 'hg19',
"query": {'charset': 'utf8'}
}
_seq_url = "ftp://hgdownload.cse.ucsc.edu/goldenPath/hg19/bigZips/chromFa.tar.gz"
_chrom_set = ["chr"+str(i) for i in range(1, 23)] + ["chrX", "chrY"]
def fetch_seq(df, df_total, chrom, coord_version, window_size=1000):
print("[INFO] Sequencing fetch ref+alt+haplotype+2strands alleles of {} of length {} ......".format(chrom, window_size))
df['seq_ref_1'] = ''
df['seq_ref_2'] = ''
df['seq_alt_1'] = ''
df['seq_alt_2'] = ''
df['seq_hap_1'] = ''
df['seq_hap_2'] = ''
n_empty = 0
if coord_version == 'hg19':
dna_chr = list(SeqIO.parse("chromFa_hg19/{}.fa".format(chrom), "fasta"))[0].seq
elif coord_version == 'hg38':
dna_chr = list(SeqIO.parse("chromFa_hg38/{}.fa".format(chrom), "fasta"))[0].seq
for ind, row in tqdm(df.iterrows()):
start = row['pos'] - window_size // 2
end = row['pos'] + window_size // 2
nearby = df_total.loc[(df_total['pos'] >= start) & (df_total['pos'] < end)]
if start >= 0 and end <= len(dna_chr):
ref_seq = dna_chr[start: end]
alt_seq = dna_chr[start: row['pos']-1] + row['alt'] + dna_chr[row['pos']: end]
df.ix[ind, 'seq_ref_1'] = ref_seq
df.ix[ind, 'seq_ref_2'] = ref_seq.reverse_complement()
df.ix[ind, 'seq_alt_1'] = alt_seq
df.ix[ind, 'seq_alt_2'] = alt_seq.reverse_complement()
hap_seq = list(ref_seq)
for i, v in nearby.iterrows():
hap_seq[v['pos']-1-start] = v['alt']
hap_seq = Seq(''.join(hap_seq))
df.ix[ind, 'seq_hap_1'] = hap_seq
df.ix[ind, 'seq_hap_2'] = hap_seq.reverse_complement()
else:
n_empty += 1
df = df.dropna(subset=['seq_ref_1', 'seq_ref_2', 'seq_alt_1', 'seq_alt_2', 'seq_hap_1', 'seq_hap_2'])
print('[INFO] n_empty of {} is: {}'.format(chrom, n_empty))
return df
def fast_fetch_seq(df, chrom, coord_version, window_size=1000):
cores = mp.cpu_count()
pool = mp.Pool(cores)
df_list = np.array_split(df, cores)
df_seq = pd.concat(pool.starmap(fetch_seq, zip(df_list, repeat(df[['pos', 'alt']]), repeat(chrom), repeat(coord_version), repeat(window_size))))
pool.close()
pool.join()
return df_seq
def fetch_metadata(rsid):
db = create_engine(URL(**_db_url), poolclass=NullPool)
db.execute("SET sql_mode = 'NO_UNSIGNED_SUBTRACTION'")
snps = ", ".join("'" + x + "'" for x in rsid)
query = '''
SELECT
s.name, s.chrom, s.chromStart, s.chromEnd
FROM
snp146 s
WHERE
s.name IN ( ''' + snps + ''')
'''
rows = db.execute(query)
metadata = pd.DataFrame(rows.fetchall())
metadata.columns = rows.keys()
metadata = metadata.rename(columns={"name":"rsid"})
return metadata
def fast_fetch_metadata(rsid, save=None):
# parallel metadata query
cores = mp.cpu_count()
pool = mp.Pool(cores)
rsid_split = | np.array_split(rsid, cores) | numpy.array_split |
import pandas as pd
import numpy as np
from multiprocessing import Pool
from functools import partial
from primacy.utils import (
get_json_obj, write_json_obj,
convert_dict_to_dataframe,
get_primer_dataframe)
def min_max_scale_clipped(values, v_min, v_max):
scaled = np.divide(np.subtract(values, v_min), np.subtract(v_max, v_min))
scaled[scaled > 1] = 1
return scaled
def get_distance(values, range_min, range_max):
dist = np.zeros(len(values))
above = np.where(values > range_max)
below = np.where(values < range_min)
dist[above] = np.abs(np.subtract(values[above], range_max))
dist[below] = np.abs( | np.subtract(range_min, values[below]) | numpy.subtract |
'''Generalized analytic slice sampling (GASS) for truncated multivariate normal
priors.
Author: <NAME> (co-figured out with <NAME>)
Date: May 2019
'''
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
from fast_mvn import sample_mvn
def gass(x, Sigma, loglikelihood, Constraints,
cur_ll=None, mu=None, verbose=False, ll_args=None,
sparse=False, precision=False, chol_factor=False, Q_shape=None,
ngrid=100):
# Current log-likelihood
if cur_ll is None:
cur_ll = loglikelihood(x, ll_args)
# Select slice height
ll = cur_ll + np.log(np.random.random())
# Sample proposal
v = sample_mvn(Sigma, mu=np.zeros_like(x), sparse=sparse, precision=precision, chol_factor=chol_factor, Q_shape=Q_shape)
# Mean of the gaussian
if mu is None:
mu = np.zeros_like(x)
# Constraint matrix should have the inequality at the last column
assert Constraints.shape[1] == mu.shape[0]+1
# x must be a valid starting point
assert np.all(Constraints[:,:-1].dot(x) >= Constraints[:,-1]), 'Invalid starting point!\n{}\nConstraints:\n{}'.format(x, (Constraints[:,:-1].dot(x) - Constraints[:,-1]).min())
# Calculate lower bound constraints on the slice interval range
x0 = x - mu
a = Constraints[:,:-1].dot(x0)
b = Constraints[:,:-1].dot(v)
c = Constraints[:,-1] - Constraints[:,:-1].dot(mu)
sqrt_term = a**2 + b**2 - c**2
eps = 1e-6
# Two cases cause the entire ellipse to be valid:
# 1) the sqrt term is less than zero. this implies a**2 + b**2 < c**2 ==> a cos\theta + b sin\theta > d for all \theta
# 2) a = -c. this implies the only place the constraint touches the ellipse is on the extremal point.
# For anything else, some values of the ellipse will be invalid and must be pruned
concerning = (sqrt_term >= 0) & (a != -c)
if np.any(concerning):
denom = a + c
theta1 = 2*np.arctan((b[concerning] + np.sqrt(sqrt_term[concerning])) / denom[concerning])
theta2 = 2*np.arctan((b[concerning] - np.sqrt(sqrt_term[concerning])) / denom[concerning])
# If a^2 < c^2, we care about the complement of the region because the quadratic is convex.
# Otherwise, the quadratic is concave and we care about the interval
complements = a[concerning]**2 < c[concerning]**2
theta1_complements = theta1[complements]
theta1_interval = theta1[~complements]
theta2_complements = theta2[complements]
theta2_interval = theta2[~complements]
# Numerically approximate the intersection of the valid [-pi, pi] regions
grid = np.linspace(-np.pi, np.pi, 10000)
# Complements require the grid to be outside the [min,max] interval
if np.any(complements):
# TODO: vectorize
for t1, t2 in zip(theta1_complements, theta2_complements):
grid = grid[(grid <= min(t1, t2)) | (grid >= max(t1,t2))]
# Intervals require the grid to be inside the [min,max] interval
if np.any(~complements):
theta_order = theta1_interval < theta2_interval
theta_min = (theta_order*theta1_interval + (~theta_order)*theta2_interval).max() + eps
theta_max = (theta_order*theta2_interval + (~theta_order)*theta1_interval).min() - eps
grid = grid[(grid >= theta_min) & (grid <= theta_max)]
else:
# The entire ellipse is valid
grid = np.linspace(-np.pi, np.pi, ngrid)
if verbose > 1:
np.set_printoptions(precision=3, suppress=True, linewidth=250)
print('x: ', x)
print('x-mu: ', x0)
print('v: ', v)
print('mu: ', mu)
print('')
print('Grid points accepted:', grid)
print('Total grid points: {}'.format(len(grid)))
print('thetas:')
for i, a_i, b_i, c_i, comp_i, theta1_i, theta2_i in zip(np.arange(len(concerning))[concerning],
a[concerning],
b[concerning],
c[concerning],
complements,
theta1/np.pi,
theta2/np.pi):
print('{} a: {:.2f} b: {:.2f} c: {:.2f} complement? {} theta1: {:.2f} theta2: {:.2f}'.format(i, a_i, b_i, c_i, comp_i, theta1_i, theta2_i))
if len(grid) == 0:
grid_options = []
if verbose:
import warnings
warnings.warn('No valid slice regions! Bug??')
else:
# Downsample the grid f there are more grid points than specified
if len(grid) > ngrid:
grid = np.random.choice(grid, size=ngrid, replace=False)
# Quasi-Monte Carlo via grid approximation
grid_options = x0[None]*np.cos(grid[:,None]) + v[None]*np.sin(grid[:,None]) + mu[None]
grid_ll = loglikelihood(grid_options, ll_args) # Log-likelihood function should support batching
grid_options = grid_options[grid_ll >= ll]
grid_ll = grid_ll[grid_ll >= ll]
# Uniform selection over the viable grid points
if len(grid_options) > 0:
selected = np.random.choice(len(grid_options))
x = grid_options[selected]
new_ll = grid_ll[selected]
else:
if verbose:
import warnings
warnings.warn('All theta values rejected. Possible bug or theta grid is too coarse.')
theta = 0
new_ll = cur_ll
return x, new_ll
def benchmarks():
'''Benchmarking GASS vs.
1) naive ESS + rejection sampling
2) logistic ESS + rejection sampling for monotonicity
3) logistic ESS + posterior projection'''
from functionalmf.elliptical_slice import elliptical_slice as ess
from functionalmf.utils import ilogit, pav
from scipy.stats import gamma
np.random.seed(42)
ntrials = 100
nmethods = 5
nobs = 3
sample_sizes = np.array([100, 500, 1000, 5000, 10000], dtype=int)
nsizes = len(sample_sizes)
nburn = nsamples = sample_sizes.max()
verbose = True
mu_prior = np.array([0.95, 0.8, 0.75, 0.5, 0.29, 0.2, 0.17, 0.15, 0.15]) # monotonic curve prior
T = len(mu_prior)
mse = np.zeros((ntrials,nsizes,nmethods))
coverage = np.zeros((ntrials, nsizes, nmethods,T), dtype=bool)
for trial in range(ntrials):
print('Trial {}'.format(trial))
b = 3
min_mu, max_mu = 0.1, 1
sigma_prior = 0.1*np.array([np.exp(-0.5*(i - np.arange(T))**2 / b) for i in range(T)]) # Squared exponential kernel
# Sample the true mean via rejection sampling
mu_truth = np.random.multivariate_normal(mu_prior, sigma_prior)
while mu_truth.min() < min_mu or mu_truth.max() > max_mu or (mu_truth[1:] - mu_truth[:-1]).max() > 0:
mu_truth = np.random.multivariate_normal(mu_prior, sigma_prior)
print(mu_truth)
# Plot some data points using the true scale
data = np.array([np.random.gamma(100, scale=mu_truth) for _ in range(nobs)]).T
samples = np.zeros((nsamples, nmethods, T))
xobs = np.tile(np.arange(T), (nobs, 1)).T
# Linear constraints requiring monotonicity and [0, 1] intervals
C_zero = np.concatenate([ | np.eye(T) | numpy.eye |
import argparse
import numpy as np
from tqdm import tqdm
from astropy.table import Table
from startrail.paths import registration_dir, valid_table, adjust_table
from numpy.fft import fft2, ifft2
from copy import deepcopy
from startrail.api import Survey
from astropy.wcs import WCS
from scipy.signal import correlate
surv = Survey.get_core_survey()
t = Table.read(adjust_table)
PIX2DEG = 7.285e-5
NUMCCDS = 61
def guess(seq_ind, exp_ind):
exp_map = {
1: 0.22175029665231705,
2: 0.3976368308067322,
3: 0.5731573700904846,
4: 0.7405745387077332,
}
return exp_map[exp_ind]
def conv(a,b):
return np.real(ifft2(fft2(b) * | fft2(a) | numpy.fft.fft2 |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD35/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD35/04-VELPTB000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/MFD35/01-ADCPTF000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/MFD35/01-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#Coastal Pioneer WireFollowing Profilers (WFP
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSPM/SBS11/02-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP04OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP04OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP01CNPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP01CNPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMCO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMCO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMCO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUI/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUI' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUI/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP02PMUO/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP02PMUO' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP02PMUO/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CP03ISPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CP03ISPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = | np.array([]) | numpy.array |
import numpy as np
import sklearn
def discrete_entropy(ys):
"""Compute discrete mutual information."""
num_factors = ys.shape[0]
h = np.zeros(num_factors)
for j in range(num_factors):
h[j] = sklearn.metrics.mutual_info_score(ys[j, :], ys[j, :])
return h
def discrete_mutual_info(mus, ys):
"""Compute discrete mutual information."""
num_codes = mus.shape[0]
num_factors = ys.shape[0]
m = | np.zeros([num_codes, num_factors]) | numpy.zeros |
import unittest
import sys
import bottlechest as bn
import numpy as np
import scipy.sparse as sp
class TestContingency(unittest.TestCase):
def test_1d_int(self):
data = np.array([0, 1, 1, 2, 1])
bb = [0, 1, 1, 0, 0]
for b in [bb, np.array(bb, dtype=np.int8), np.array(bb, dtype=float)]:
counts, nans = bn.contingency(data, b, 2, 1)
np.testing.assert_almost_equal(counts, [[1, 1, 1], [0, 2, 0]])
np.testing.assert_almost_equal(nans, np.zeros(2))
def test_1d_float(self):
nan = float("nan")
data = np.array([0, 1, nan, 2, 1], dtype=float)
bb = [0, 1, 1, 0, 0]
for b in [bb, np.array(bb, dtype=np.int8), np.array(bb, dtype=float)]:
counts, nans = bn.contingency(data, b, 2, 1)
np.testing.assert_almost_equal(counts, [[1, 1, 1], [0, 1, 0]])
np.testing.assert_almost_equal(nans, [0, 1])
def test_1d_mask_int(self):
data = np.array([0, 1, 1, 2, 1])
bb = [0, 1, 1, 0, 0]
counts, nans = bn.contingency(data, bb, 2, 1, mask=[1])
np.testing.assert_almost_equal(counts, [[1, 1, 1], [0, 2, 0]])
np.testing.assert_almost_equal(nans, [0, 0])
counts, nans = bn.contingency(data, bb, 2, 1, mask=[0])
np.testing.assert_almost_equal(counts, np.zeros((2, 3)))
np.testing.assert_almost_equal(nans, [0, 0])
def test_1d_mask_float(self):
nan = float("nan")
data = np.array([0, 1, nan, 2, 1], dtype=float)
bb = [0, 1, 1, 0, 0]
counts, nans = bn.contingency(data, bb, 2, 1, mask=[1])
np.testing.assert_almost_equal(counts, [[1, 1, 1], [0, 1, 0]])
np.testing.assert_almost_equal(nans, [0, 1])
counts, nans = bn.contingency(data, bb, 2, 1, mask=[0])
np.testing.assert_almost_equal(counts, np.zeros((2, 3)))
np.testing.assert_almost_equal(nans, [0, 0])
def test_1d_weighted_int(self):
data = np.array([0, 1, 1, 2, 1])
bb = [0, 1, 1, 0, 0]
counts, nans = bn.contingency(data, bb, 2, 1, weights=[1, 2, 3, 4, 5])
np.testing.assert_almost_equal(counts, [[1, 5, 3], [0, 3, 0]])
np.testing.assert_almost_equal(nans, np.zeros(2))
def test_1d_weighted_int(self):
nan = float("nan")
data = np.array([0, 1, nan, 2, 1], dtype=float)
bb = [0, 1, 1, 0, 0]
for b in [bb, np.array(bb, dtype=np.int8), np.array(bb, dtype=float)]:
counts, nans = bn.contingency(data, b, 2, 1, weights=[1, 2, 3, 4, 5])
np.testing.assert_almost_equal(counts, [[1, 5, 4], [0, 2, 0]])
np.testing.assert_almost_equal(nans, [0, 3])
def test_simple_int(self):
data = np.array([[0, 1, 1, 2, 1],
[1, 1, 1, 0, 1],
[0, 0, 3, 0, 0]], dtype=int)
for b in [
np.array([1, 0, 1], dtype=np.int8),
np.array([1, 0, 1], dtype=float),
[1, 0, 1]]:
counts, nans = bn.contingency(data, b, 3, 1)
np.testing.assert_almost_equal(counts[0], [[0, 1, 0, 0], [2, 0, 0, 0]])
np.testing.assert_almost_equal(counts[1], [[0, 1, 0, 0], [1, 1, 0, 0]])
np.testing.assert_almost_equal(counts[2], [[0, 1, 0, 0], [0, 1, 0, 1]])
np.testing.assert_almost_equal(counts[3], [[1, 0, 0, 0], [1, 0, 1, 0]])
np.testing.assert_almost_equal(counts[4], [[0, 1, 0, 0], [1, 1, 0, 0]])
np.testing.assert_almost_equal(nans, np.zeros((5, 2)))
def test_simple_float(self):
nan = float("nan")
data = np.array([[0, 1, 1, 2, 1],
[1, 1, 1, nan, 1],
[0, 0, 3, nan, nan]], dtype=float)
counts, nans = bn.contingency(data, [1, 0, 1], 3, 1)
np.testing.assert_almost_equal(counts[0], [[0, 1, 0, 0], [2, 0, 0, 0]])
np.testing.assert_almost_equal(counts[1], [[0, 1, 0, 0], [1, 1, 0, 0]])
np.testing.assert_almost_equal(counts[2], [[0, 1, 0, 0], [0, 1, 0, 1]])
np.testing.assert_almost_equal(counts[3], [[0, 0, 0, 0], [0, 0, 1, 0]])
np.testing.assert_almost_equal(counts[4], [[0, 1, 0, 0], [0, 1, 0, 0]])
np.testing.assert_almost_equal(nans, [[0, 0], [0, 0], [0, 0], [1, 1], [0, 1]])
def test_weighted_int(self):
data = np.array([[0, 1, 1, 2, 1],
[1, 1, 1, 0, 1],
[0, 0, 3, 0, 0]], dtype=int)
counts, nans = bn.contingency(data, [1, 0, 1], 3, 1, weights=[1, 2, 3])
np.testing.assert_almost_equal(counts[0], [[0, 2, 0, 0], [4, 0, 0, 0]])
np.testing.assert_almost_equal(counts[1], [[0, 2, 0, 0], [3, 1, 0, 0]])
np.testing.assert_almost_equal(counts[2], [[0, 2, 0, 0], [0, 1, 0, 3]])
np.testing.assert_almost_equal(counts[3], [[2, 0, 0, 0], [3, 0, 1, 0]])
np.testing.assert_almost_equal(counts[4], [[0, 2, 0, 0], [3, 1, 0, 0]])
np.testing.assert_almost_equal(nans, np.zeros((5, 2)))
def test_weighted_float(self):
nan = float("nan")
data = np.array([[0, 1, 1, 2, 1],
[1, 1, 1, nan, 1],
[0, 0, 3, nan, nan]], dtype=float)
counts, nans = bn.contingency(data, [1, 0, 1], 3, 1, weights=[1, 2, 3])
np.testing.assert_almost_equal(counts[0], [[0, 2, 0, 0], [4, 0, 0, 0]])
np.testing.assert_almost_equal(counts[1], [[0, 2, 0, 0], [3, 1, 0, 0]])
np.testing.assert_almost_equal(counts[2], [[0, 2, 0, 0], [0, 1, 0, 3]])
np.testing.assert_almost_equal(counts[3], [[0, 0, 0, 0], [0, 0, 1, 0]])
np.testing.assert_almost_equal(counts[4], [[0, 2, 0, 0], [0, 1, 0, 0]])
np.testing.assert_almost_equal(nans, [[0, 0], [0, 0], [0, 0], [2, 3], [0, 3]])
def test_mask_int(self):
data = np.array([[0, 1, 1, 2, 1],
[1, 1, 1, 0, 1],
[0, 0, 3, 0, 0]], dtype=int)
for b in [
np.array([1, 0, 1], dtype=np.int8),
np.array([1, 0, 1], dtype=float),
[1, 0, 1]]:
counts, nans = bn.contingency(data, b, 3, 1, mask=[1, 1, 0, 0, 1])
np.testing.assert_almost_equal(counts[0], [[0, 1, 0, 0], [2, 0, 0, 0]])
np.testing.assert_almost_equal(counts[1], [[0, 1, 0, 0], [1, 1, 0, 0]])
np.testing.assert_almost_equal(counts[2], np.zeros((2, 4)))
np.testing.assert_almost_equal(counts[3], np.zeros((2, 4)))
np.testing.assert_almost_equal(counts[4], [[0, 1, 0, 0], [1, 1, 0, 0]])
np.testing.assert_almost_equal(nans, np.zeros((5, 2)))
def test_mask_float(self):
nan = float("nan")
data = np.array([[0, 1, 1, 2, 1],
[1, 1, 1, nan, 1],
[0, 0, 3, nan, nan]], dtype=float)
counts, nans = bn.contingency(data, [1, 0, 1], 3, 1, mask=[1, 1, 0, 0, 1])
np.testing.assert_almost_equal(counts[0], [[0, 1, 0, 0], [2, 0, 0, 0]])
np.testing.assert_almost_equal(counts[1], [[0, 1, 0, 0], [1, 1, 0, 0]])
np.testing.assert_almost_equal(counts[2], np.zeros((2, 4)))
np.testing.assert_almost_equal(counts[3], np.zeros((2, 4)))
np.testing.assert_almost_equal(counts[4], [[0, 1, 0, 0], [0, 1, 0, 0]])
np.testing.assert_almost_equal(nans, [[0, 0], [0, 0], [0, 0], [0, 0], [0, 1]])
def test_mask_weighted_int(self):
data = np.array([[0, 1, 1, 2, 1],
[1, 1, 1, 0, 1],
[0, 0, 3, 0, 0]], dtype=int)
counts, nans = bn.contingency(data, [1, 0, 1], 3, 1,
weights=[1, 2, 3], mask=[1, 1, 0, 0, 1])
np.testing.assert_almost_equal(counts[0], [[0, 2, 0, 0], [4, 0, 0, 0]])
np.testing.assert_almost_equal(counts[1], [[0, 2, 0, 0], [3, 1, 0, 0]])
np.testing.assert_almost_equal(counts[2], np.zeros((2, 4)))
np.testing.assert_almost_equal(counts[3], np.zeros((2, 4)))
np.testing.assert_almost_equal(counts[4], [[0, 2, 0, 0], [3, 1, 0, 0]])
np.testing.assert_almost_equal(nans, np.zeros((5, 2)))
def test_mask_weighted_float(self):
nan = float("nan")
data = np.array([[0, 1, 1, 2, 1],
[1, 1, 1, nan, 1],
[0, 0, 3, nan, nan]], dtype=float)
counts, nans = bn.contingency(data, [1, 0, 1], 3, 1,
weights=[1, 2, 3], mask=[1, 1, 0, 0, 1])
np.testing.assert_almost_equal(counts[0], [[0, 2, 0, 0], [4, 0, 0, 0]])
np.testing.assert_almost_equal(counts[1], [[0, 2, 0, 0], [3, 1, 0, 0]])
np.testing.assert_almost_equal(counts[2], np.zeros((2, 4)))
np.testing.assert_almost_equal(counts[3], np.zeros((2, 4)))
np.testing.assert_almost_equal(counts[4], [[0, 2, 0, 0], [0, 1, 0, 0]])
np.testing.assert_almost_equal(nans, [[0, 0], [0, 0], [0, 0], [0, 0], [0, 3]])
def test_sparse_int(self):
data = np.array([1, 1, 2, 2, 1, 3])
indptr = [0, 3, 4, 6]
indices = [0, 1, 2, 0, 1, 2]
a = sp.csr_matrix((data, indices, indptr), shape=(3, 4))
counts, nans = bn.contingency(a, [1, 0, 1], 3, 1)
np.testing.assert_almost_equal(counts[0], [[0, 0, 1, 0], [0, 1, 0, 0]])
np.testing.assert_almost_equal(counts[1], [[0, 0, 0, 0], [0, 2, 0, 0]])
np.testing.assert_almost_equal(counts[2], [[0, 0, 0, 0], [0, 0, 1, 1]])
np.testing.assert_almost_equal(counts[3], np.zeros((2, 4)))
np.testing.assert_almost_equal(nans, np.zeros((4, 2)))
def test_sparse_float(self):
data = np.array([1, 1, 2, 2, 1, 3], dtype=float)
indptr = [0, 3, 4, 6]
indices = [0, 1, 2, 0, 1, 2]
a = sp.csr_matrix((data, indices, indptr), shape=(3, 4))
counts, nans = bn.contingency(a, [1, 0, 1], 3, 1)
np.testing.assert_almost_equal(counts[0], [[0, 0, 1, 0], [0, 1, 0, 0]])
np.testing.assert_almost_equal(counts[1], [[0, 0, 0, 0], [0, 2, 0, 0]])
np.testing.assert_almost_equal(counts[2], [[0, 0, 0, 0], [0, 0, 1, 1]])
np.testing.assert_almost_equal(counts[3], np.zeros((2, 4)))
np.testing.assert_almost_equal(nans, np.zeros((4, 2)))
def test_sparse_weight_int(self):
data = np.array([1, 1, 2, 2, 1, 3])
indptr = [0, 3, 4, 6]
indices = [0, 1, 2, 0, 1, 2]
a = sp.csr_matrix((data, indices, indptr), shape=(3, 4))
counts, nans = bn.contingency(a, [1, 0, 1], 3, 1, weights=[1, 2, 3])
np.testing.assert_almost_equal(counts[0], [[0, 0, 2, 0], [0, 1, 0, 0]])
np.testing.assert_almost_equal(counts[1], [[0, 0, 0, 0], [0, 4, 0, 0]])
np.testing.assert_almost_equal(counts[2], [[0, 0, 0, 0], [0, 0, 1, 3]])
np.testing.assert_almost_equal(counts[3], np.zeros((2, 4)))
def test_sparse_weight_float(self):
data = np.array([1, 1, 2, 2, 1, 3], dtype=float)
indptr = [0, 3, 4, 6]
indices = [0, 1, 2, 0, 1, 2]
a = sp.csr_matrix((data, indices, indptr), shape=(3, 4))
counts, nans = bn.contingency(a, [1, 0, 1], 3, 1, weights=[1, 2, 3])
np.testing.assert_almost_equal(counts[0], [[0, 0, 2, 0], [0, 1, 0, 0]])
np.testing.assert_almost_equal(counts[1], [[0, 0, 0, 0], [0, 4, 0, 0]])
np.testing.assert_almost_equal(counts[2], [[0, 0, 0, 0], [0, 0, 1, 3]])
np.testing.assert_almost_equal(counts[3], np.zeros((2, 4)))
def test_sparse_mask_int(self):
data = np.array([1, 1, 2, 2, 1, 3])
indptr = [0, 3, 4, 6]
indices = [0, 1, 2, 0, 1, 2]
a = sp.csr_matrix((data, indices, indptr), shape=(3, 4))
counts, nans = bn.contingency(a, [1, 0, 1], 3, 1, mask=[1, 0, 0, 1])
np.testing.assert_almost_equal(counts[0], [[0, 0, 1, 0], [0, 1, 0, 0]])
np.testing.assert_almost_equal(counts[1], np.zeros((2, 4)))
np.testing.assert_almost_equal(counts[2], np.zeros((2, 4)))
np.testing.assert_almost_equal(counts[3], np.zeros((2, 4)))
def test_sparse_mask_float(self):
data = np.array([1, 1, 2, 2, 1, 3], dtype=float)
indptr = [0, 3, 4, 6]
indices = [0, 1, 2, 0, 1, 2]
a = sp.csr_matrix((data, indices, indptr), shape=(3, 4))
counts, nans = bn.contingency(a, [1, 0, 1], 3, 1, mask=[1, 0, 0, 1])
np.testing.assert_almost_equal(counts[0], [[0, 0, 1, 0], [0, 1, 0, 0]])
np.testing.assert_almost_equal(counts[1], np.zeros((2, 4)))
np.testing.assert_almost_equal(counts[2], np.zeros((2, 4)))
np.testing.assert_almost_equal(counts[3], np.zeros((2, 4)))
def test_sparse_mask_weight_int(self):
data = np.array([1, 1, 2, 2, 1, 3])
indptr = [0, 3, 4, 6]
indices = [0, 1, 2, 0, 1, 2]
a = sp.csr_matrix((data, indices, indptr), shape=(3, 4))
counts, nans = bn.contingency(a, [1, 0, 1], 3, 1,
weights=[1, 2, 3], mask=[1, 0, 0, 1])
| np.testing.assert_almost_equal(counts[0], [[0, 0, 2, 0], [0, 1, 0, 0]]) | numpy.testing.assert_almost_equal |
'''
Mon Jan 1 11:50:27 MST 2018
<NAME>
This script estimates the black hole puncture parameters for initial data.
The script implements the method outlined in Section IV of
<NAME> and <NAME>, "Simple method to set up low eccentricity
initial data for moving puncture simulations," Phys. Rev. D 83,
024012 (2011).
The Post-Newtonian equations are from
<NAME>, "Coalescing binary systems of compact objects to
(post)^{5/2}-Newtonian order. V. Spin effects,"
Phys. Rev. D 52 821 (1995).
'''
import argparse
import numpy as np
from math import *
import sys
# Set default values for the Gravitational constant and the speed of light.
G = 1.0
c = 1.0
def normal_vector(v):
'''
create a normal vector. If v = [ 0, 0, 0], then return the unit vector zhat
'''
vmag = np.linalg.norm(v)
if vmag > 1.0e-9:
n = v / vmag
else:
n = np.array([0,0,1])
return n
#----------------------------------------------------------------------
# Set up commandline arguments
#----------------------------------------------------------------------
zd = sqrt(2.0) * 1.0e-5
parser = argparse.ArgumentParser(description='Estimate parameters for puncture initial data.')
#parser.add_argument('-o','--order', type=str, default="2",
# help='Post-Newtonian order of the equations',
# choices=["0","1","2","3"])
#parser.add_argument('--m1', type=float, default=1.0,
# help='Mass of star 1 (Default 1.0)', metavar='NUM')
#parser.add_argument('--m2', type=float, default=1.0,
# help='Mass of star 2 (Default 1.0)', metavar='NUM')
parser.add_argument('separation', type=float,
help='seperation of the binary star system', metavar = 'SEPARATION')
parser.add_argument('-M', '--total_mass', type=float, default=1.0,
help='Total ADM Mass (Default 1.0)', metavar='NUM')
parser.add_argument('-r', '--mass_ratio', type=float, default=1.0,
help='Mass ratio r = m1/m2, r >= 1 (Default 1.0)', metavar='NUM')
parser.add_argument('--s1', type=str, default="0, 0, 0",
help='BH1 spin "spin parameter, theta, phi". (Default "0,0,0")',
metavar='"NUM, NUM, NUM"')
parser.add_argument('--s2', type=str, default="0, 0, 0",
help='BH2 spin "spin parameter, theta, phi". (Default "0,0,0")',
metavar='"NUM, NUM, NUM"')
parser.add_argument('-z', '--zoffset', type=float, default=zd,
help='z coordinate of bh plane (Default sqrt(2)*1e-5)', metavar='NUM')
args = parser.parse_args()
d = args.separation
M = args.total_mass
mr = args.mass_ratio
zoffset = args.zoffset
if d < 6.0:
print('separation between black holes too small. d >= 6.0')
sys.exit()
# Calculate the ADM masses of each black hole, m1 and m2.
m2 = M/(mr + 1.0)
m1 = M - m2
mu = m1 * m2 / M
nu = mu / M
GMD = G * M / d
# Calculate the bare masses.
# These are used for puncture initial data, but the ADM masses are used
# in the PN expressions. The expression for the bare masses is derived
# from Eq. (22) of Tichy and Marronetti.
mb1 = 0.5*(m1 - m2 - 2.0*d + sqrt( 8.0*m1*d + (m2 - m1 + 2.0*d)**2))
mb2 = 0.5*(m2 - m1 - 2.0*d + sqrt( 8.0*m1*d + (m2 - m1 + 2.0*d)**2))
#
# The spin parameters are spin magnitude, theta, phi. Theta, phi are the
# standard spherical coordinate angles. Use this to construct the spin vectors
# S1 and S2 in Cartesian coordinates.
#
s1pars = np.fromstring(args.s1,count=3,sep=',')
s2pars = np.fromstring(args.s2,count=3,sep=',')
xi1 = s1pars[0]
theta1 = s1pars[1]
phi1 = s1pars[2]
xi2 = s2pars[0]
theta2 = s2pars[1]
phi2 = s2pars[2]
S1norm = xi1 * m1**2
S2norm = xi2 * m2**2
S1 = [ S1norm * sin(theta1) * cos(phi1), S1norm * sin(theta1) * sin(phi1), S1norm * cos(theta1) ]
S2 = [ S2norm * sin(theta2) * cos(phi2), S2norm * sin(theta2) * sin(phi2), S2norm * cos(theta2) ]
'''
Calculate the tangential momentum for the zero-spin case using Eq. (45) from
<NAME>, <NAME>, and <NAME>, "Numerical black hole initial
data with low eccentricity based on post-Newtonian orbital parameters,"
arXiv:0901.0993v3 [gr-qc] 2009.
This is now deprecated.
if args.order == "0":
ptns = mu * sqrt(GMD)
elif args.order == "1":
ptns = mu * (sqrt(GMD) + 1.0/c**2 * GMD**1.5)
elif args.order == "2":
ptns = mu * (sqrt(GMD) + 1.0/c**2 * GMD**1.5 +
1.0/(16.0 * c**4) * (42 - 43 * nu) * GMD**2.5)
elif args.order == "3":
ptns = mu * (sqrt(GMD) + 1.0/c**2 * GMD**1.5 +
1.0/(16.0 * c**4) * (42 - 43 * nu) * GMD**2.5 +
1.0/(128.0*c**6) * (480.0 + (163*pi**2 - 4556)*nu +
104*nu**2)*GMD**3.5)
else:
print('Unknown PN order = ' + args.order)
'''
#
# Calculate the tangential momentum following Tichy & Marronetti.
# Set the Newtonian angular momentum (LNewt) to be along the z-axis.
#
LNewt = np.array([0,0,1])
#
# S1N and S2N are \hat{\bf s}_A in Kidder.
#
S1N = normal_vector(S1)
S2N = normal_vector(S2)
#
# Calculate the PN2 angular momentum L from Eqs. (4.7) and (2.8) in Kidder.
#
T0 = mu*sqrt(M*d)*(1.0 + 2.0*GMD - 0.25*(xi1*np.dot(LNewt,S1N)*(8.0*(m1/M)**2 + 7.0*nu) + xi2 *np.dot(LNewt,S2N)*(8.0*(m2/M)**2 + 7.0*nu))*GMD**(1.5) + (0.5*(5.0 - 9.0*nu) - 0.75*nu*xi1*xi2*(np.dot(S1N,S2N) - 3.0* | np.dot(LNewt, S1N) | numpy.dot |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 5 10:00:16 2018
@author: DaniJ
This module is supossed to contain the algorithms and information of Chemical speciation plus sorption.
It is a daughter of Database_SC but it can be used without a database.
[If feasible (question of time), I will keep it apart]
"""
from Database_SC import Database_SC
import numpy as np
from scipy import linalg
import scipy.integrate as integrate
from scipy import optimize
#import scipy as sp
class ChemSys_Surf (Database_SC):
'''
ChemSys is a daughter class from Database_SC which is a daughter class of Database. Hence, they depend on these parameters.
#Note for myself and other contributors, if you add or delete properties or methods of the class, documeted it here. Otherwise, it is a little caos (regarding my own experience)
properties:
Faraday_constant
temperature
dielectric_constant
permittivity_free_space
A_activitypar
B_activitypar
universal_gas_constant
ionic_strength_constant
fix_ionic_strength
S
S_electro
names_elec_sorpt
length_names_elec_sorpt
U
A_Borkovec
B_Borkovec
A_Borkovec_columns
A_Borkovec_rows
aq_u_vector
waterdensity
index_related_sorpt_pri
methods:
set_S
set_vector_aqueous_component_value
set_names_electrostatic_variables
set_electro_sorption_stoichiometric_M
set_universal_gas_constant
set_Faraday_constant
set_temperature
set_dielectric_constant
set_constant_ionic_strength
set_permittivity_free_space
calculate_dielectric_constant
calculate_A_activitypar
calculate_B_activitypar
calculate_ionic_strength
calculate_waterdensity
calculate_u_electro
define_system_from_input_and_database
create_S
create_U
remove_electro_mass_from_U
separte_S_into_S1_and_S2
create_electro_sorption_stoichiometric_M
create_stoichiometric_surfacepotential
search_index_list_classlist
search_index_list_listdictionaryreactions
instantiation_step
speciation_Westall1980_CCM # NOTE --> probably speciation_Westall1980_CCM, speciation_Westall1980_TLM can be unified in one algorithm, so far it is kept separated.
speciation_Westall1980_TLM #
create_sorpt_vec
Boltzman_factor_2_psi
Jacobian_Speciation_Westall1980
print_speciation
speciation_Borkovec_1983_DLM
get_z_vector
calculate_log_activity_coefficient_aq_pri_species
calculate_log_activity_coefficient_aq_sec_species
NOTE: Remark that ChemSys_Surf is a daughter class from Database_SC. Therefore, in order to create the pseudo S matrix (The stoichiometric matrix that does not contain the surface potential as unknown). Methods like ...
... set_names_aq_primary_species (names_aq_pri_sp), set_names_aq_secondary_species (names_aq_sec_sp), set_names_sorpt_primary_species (names_sorpt_pri_sp), set_names_sorpt_secondary_species (names_sorpt_sec_sp), set_aq_list_pri_class (list_aq_pri_sp), ...
... set_aq_list_sec_class (list_aq_sec_sp) can be used and must be used. However, it has to be check that the input given is in accordance with the own system, that can be done by ???????????
'''
# Constructor
def __init__(self):
self.Faraday_constant = 96485.3328959 # C/mol
self.temperature = (273.15+25) # It assumed that initially we are at T=25°C and we assume atmospheric pressure for dielectric and other constants
self.universal_gas_constant = 8.314472 # J/(K*mol)
self.permittivity_free_space = 8.854187871e-12## Farrads = F --> F/m = C^2/(J*m) ALSO called vacuum permittivity, electri constant or distributed capacitance of the vacuum
self.calculate_dielectric_constant()
self.calculate_waterdensity()
self.calculate_A_activitypar()
self.calculate_B_activitypar()
self.ionic_strength_constant = False
pass
# Instantiation of main attributes
def define_system_from_input_and_database (self, database, n_aq_prim, list_aq_val, name_sorpt_pri, List_pri_sorpt_class = None):
'''
Given a database, the list of aqueous primary species, the list of aqueous values for the components associated to the primary species, the list of sorption of primary species
The system is defined.
As extra List_pri_sorpt_class is given to update some species. list_sorpt_pri == list_pri_sorpt_class[i].name for i in length.
'''
# check that list_sorpt_pri is coherent with List_pri_sorpt_class
assert len(n_aq_prim) == len(list_aq_val), \
"The length of the aqueous primary species and the aqueous component values is not equal."
if List_pri_sorpt_class is not None:
assert len(name_sorpt_pri) == len(List_pri_sorpt_class), \
"The length of the sorption primary species and the sorption list classes is not equal."
for i in range(0, len(name_sorpt_pri)):
assert i == name_sorpt_pri.index(List_pri_sorpt_class[i].name), 'The name or order of the list of names of sorption primary species and the list of classes of sorption primary species is not coherent.'
# Instantiation of main attributes (Although not necessary, it is useful to keep sense)
names_aq_pri_sp = n_aq_prim
names_aq_sec_sp = []
list_aq_pri_sp = []
list_aq_sec_sp = []
list_aq_reactions = []
names_sorpt_pri_sp = name_sorpt_pri
names_sorpt_sec_sp = []
if List_pri_sorpt_class is not None:
list_sorpt_pri_sp = List_pri_sorpt_class
else:
list_sorpt_pri_sp = []
list_sorpt_sec_sp = []
list_sorpt_reactions = []
# Drawn the list_aq_pri_sp & list_sorpt_pri_sp(if necessary) from Database
index_list_pri_aq = self.search_index_list_classlist (names_aq_pri_sp, database.names_aq_pri_sp)
for i in index_list_pri_aq:
list_aq_pri_sp.append(database.list_aq_pri_sp[i])
if List_pri_sorpt_class is None:
index_list_sorpt = self.search_index_classlist_list (names_sorpt_pri_sp, database.names_sorpt_pri_sp)
for i in index_list_sorpt:
list_sorpt_pri_sp.append(database.list_sorpt_pri_sp[i])
# Obtain list_aq_reactions, list_aq_sec_sp and names_aq_sec_sp from names_aq_pri_sp
index_aq_reactions, names_aq_sec_sp = self.search_index_list_listdictionaryreactions (names_aq_pri_sp, database.list_aq_reactions)
index_list_sec_aq = self.search_index_list_classlist (names_aq_sec_sp, database.names_aq_sec_sp)
for i in index_list_sec_aq:
list_aq_sec_sp.append(database.list_aq_sec_sp[i])
for i in index_aq_reactions:
list_aq_reactions.append(database.list_aq_reactions[i])
# Obtain list_sorpt_reactions, list_sorpt_sec_sp and names_sorpt_sec_sp from names_aq_pri_sp + names_aq_sec_sp + names_sorpt_pri_sp
index_sorpt_reactions, names_sorpt_sec_sp = self.search_index_list_listdictionaryreactions (names_aq_pri_sp + names_aq_sec_sp + names_sorpt_pri_sp, database.list_sorpt_reactions)
index_list_sec_sorpt = self.search_index_list_classlist (names_sorpt_sec_sp, database.names_sorpt_sec_sp)
for i in index_list_sec_sorpt:
list_sorpt_sec_sp.append(database.list_sorpt_sec_sp[i])
for i in index_sorpt_reactions:
list_sorpt_reactions.append(database.list_sorpt_reactions[i])
# Instantiation of main variables, hence definition of system to study
self.set_names_aq_primary_species (names_aq_pri_sp)
self.set_names_aq_secondary_species (names_aq_sec_sp)
self.set_names_sorpt_primary_species ( names_sorpt_pri_sp)
self.set_names_sorpt_secondary_species (names_sorpt_sec_sp)
self.set_aq_list_pri_class (list_aq_pri_sp)
self.set_aq_list_sec_class (list_aq_sec_sp)
self.set_sorpt_list_pri_class (list_sorpt_pri_sp)
self.set_sorpt_list_sec_class (list_sorpt_sec_sp)
self.set_aq_reactions_list (list_aq_reactions)
self.set_sorpt_reactions_list (list_sorpt_reactions)
self.set_vector_aqueous_component_value(list_aq_val)
def set_constant_ionic_strength (self, givenvalue):
'''
set the ionic_strength to a given value
'''
self.ionic_strength_constant = True
self.fix_ionic_strength = givenvalue
# Matrix_Creation_From_Database
def create_S (self):
# First we create the pseudoS matrix (if it does not exist) which has the following structure:
# Number_aqueous_primary_sp Number_sorption_primary_sp Number_aqueous_secondary_sp Number_sorption_secondary_sp
# n_aqueousR1 | |
# pseudoS = nRn | |
# n_sorptionR1 | Stoichiometric values |
# nRm | |
#
#
# Remark: pseudoS is a matrix that is almost the sorption stoichiometric matrix.
# The order of the columns is given by the Number_aqueous_primary_sp + Number_sorption_primary_sp + Number_aqueous_secondary_sp + Number_sorption_secondary_sp
# The order of the rows is first number of aqueous reactions followed by the number of the sorption reactions.
if not hasattr(self, 'pseudoS'):
self.create_pseudo_S()
# Now the electrostatic variables must be added. These variables are treated as chemical species. They will be introduced between Number_sorption_primary_sp and Number_aqueous_secondary_sp.
#
# Each primary sorption class should have an attribute called type_sorption. The attribute will determine the number of surface potential variables that must be added to the stoichiometric matrix.
# -CCM will add only one.
#
#
# for the number of rows. Reactions that are aqueous have 0 has stoichiometric value. The stoichiometric values for the added surface potential species is obtained by the type of sorption and b the stoichiometric_value and the charge.
if not hasattr(self, 'S_electro') or not hasattr(self, 'pseudoS_length_rows'):
self.create_electro_sorption_stoichiometric_M ()
# defining length and names of columns
self.S_names_columns = self.names_aq_pri_sp + self.names_sorpt_pri_sp + self.names_elec_sorpt + self.names_aq_sec_sp + self.names_sorpt_sec_sp
self.S_length_columns = len(self.pseudoS_names_columns) + len(self.names_elec_sorpt)
# defining length of rows
self.S_length_rows = len(self.list_aq_reactions) + len(self.list_sorpt_reactions)
pseudo_S = self.pseudoS.copy()
S_electro = self.S_electro.copy()
pos_1 = self.length_aq_pri_sp + self.length_sorpt_pri_sp
S = np.concatenate((np.concatenate ((pseudo_S[:,:pos_1], S_electro), axis = 1), pseudo_S[:,pos_1:]), axis = 1)
assert self.S_length_rows == S.shape[0]
assert self.S_length_columns == S.shape[1]
self.S = S
# Creation of the Component matrix, [Westall does not really make a difference between stoichiometric matrix and U matrix, since somehow they are related]
def create_U (self):
if not hasattr(self, 'S'):
self.create_S ()
S1, S2 = self.separte_S_into_S1_and_S2()
npri = self.length_aq_pri_sp +self.length_sorpt_pri_sp + self.length_names_elec_sorpt
I = np.identity(npri)
Stop=-np.matmul(S1.transpose(), linalg.inv(S2.transpose()))
U = np.concatenate((I, Stop), axis=1)
U = self.remove_electro_mass_from_U (U)
self.U = U
# remove_electro_mass_from_U ()
def remove_electro_mass_from_U (self, U):
'''
This methods should be used only in create_U not outside it.
'''
npri = self.length_aq_pri_sp +self.length_sorpt_pri_sp
for i in range(0, self.length_names_elec_sorpt):
U[npri, npri] = 0
npri += 1
return U
# Separate matrix from Primary and Secondary species
def separte_S_into_S1_and_S2 (self):
'''
Separates primary and Secondary species matrices.
e.g.:
Sp1 Sp1 Sp2
R1 || x11 x12 x13 || || x11 x12 || || x11 ||
S = R2 || x21 x22 x23 || in to S1 = || x21 x22 || and S2= || x21 ||
R3 || x31 x32 x33 || || x31 x32 || || x32 ||
'''
np = self.length_aq_pri_sp +self.length_sorpt_pri_sp + len(self.names_elec_sorpt)
S1 = self.S[:, 0:np].copy()
S2 = self.S[:, np:].copy()
return S1, S2
# The stoichiometric matrix derived from sorption species.
def create_electro_sorption_stoichiometric_M (self):
'''
The function assumes that some variables are already defined
'''
# create list of new boltzman surface potential variables from sorption species
self.names_elec_sorpt = []
self.index_related_sorpt_pri = []
for i in range(0,self.length_sorpt_pri_sp):
if hasattr(self.list_sorpt_pri_sp[i], 'type_relation'): # related species should be defined in the list_sorpt_pri_sp after the leading species.
self.index_related_sorpt_pri.append(self.names_sorpt_pri_sp.index(self.list_sorpt_pri_sp[i].type_relation))
elif isinstance(self.list_sorpt_pri_sp[i].names_Boltz_psi, str):
self.names_elec_sorpt.append(self.list_sorpt_pri_sp[i].names_Boltz_psi)
elif isinstance(self.list_sorpt_pri_sp[i].names_Boltz_psi, list):
for j in range(0, len(self.list_sorpt_pri_sp[i].names_Boltz_psi)):
self.names_elec_sorpt.append(self.list_sorpt_pri_sp[i].names_Boltz_psi[j])
self.length_names_elec_sorpt = len(self.names_elec_sorpt)
# Block
if not hasattr(self, 'pseudoS_length_rows'):
# self.pseudoS_length_rows = len(self.list_aq_reactions) + len(self.list_sorpt_reactions)
self.pseudoS_length_rows = self.length_aq_sec_sp + self.length_sorpt_sec_sp
S_electro = np.zeros((self.pseudoS_length_rows, self.length_names_elec_sorpt))
col_position = 0
track_dict = {}
counter = 0
for i in range(0, self.length_sorpt_pri_sp):
if hasattr(self.list_sorpt_pri_sp[i], 'type_relation'): # related species should be defined in the list_sorpt_pri_sp after the leading species.
sub_B = self.create_stoichiometric_surfacepotential (self.names_sorpt_pri_sp[i], self.list_sorpt_pri_sp[self.index_related_sorpt_pri[counter]].type_sorption)
ind_start = track_dict['start_'+ self.names_sorpt_pri_sp[self.index_related_sorpt_pri[counter]]]
ind_end =track_dict['end_'+ self.names_sorpt_pri_sp[self.index_related_sorpt_pri[counter]]]
if len(sub_B.shape) == 1:
S_electro[:, ind_start:ind_end] = S_electro[:, ind_start:ind_end] + sub_B.reshape(sub_B.shape[0],1)
else:
S_electro[:, ind_start:ind_end] = S_electro[:, ind_start:ind_end] + sub_B
counter += 1
else:
sub_B = self.create_stoichiometric_surfacepotential (self.names_sorpt_pri_sp[i], self.list_sorpt_pri_sp[i].type_sorption)
if len(sub_B.shape) == 1:
S_electro[:, col_position] = sub_B
track_dict['start_'+self.names_sorpt_pri_sp[i]] = col_position
col_position += 1
track_dict['end_'+self.names_sorpt_pri_sp[i]] = col_position
elif len(sub_B.shape) == 2:
old_col_position = col_position
col_position = col_position + sub_B.shape[1]
S_electro[:, old_col_position:col_position] = sub_B
track_dict['start_'+self.names_sorpt_pri_sp[i]] = old_col_position
track_dict['end_'+self.names_sorpt_pri_sp[i]] = col_position
self.S_electro = S_electro
# creates stoichiometric blocks
def create_stoichiometric_surfacepotential (self, name_pri_sp, type_sorpt):
'''
'''
if type_sorpt == 'CCM' or type_sorpt == 'DLM':
d = np.zeros((self.length_aq_sec_sp + self.length_sorpt_sec_sp))
for i in range(0, self.length_sorpt_sec_sp):
if self.list_sorpt_reactions[i].is_species_in_reaction (name_pri_sp):
names_species_in_reaction = [*self.list_sorpt_reactions[i].reaction]
summ_charges_times_stoichiometric = 0
for j in names_species_in_reaction:
if j in self.names_aq_pri_sp:
z = self.list_aq_pri_sp[self.names_aq_pri_sp.index(j)].charge
n = self.list_sorpt_reactions[i].reaction[j]
summ_charges_times_stoichiometric = summ_charges_times_stoichiometric + (n*z)
elif j in self.names_aq_sec_sp:
z = self.list_aq_sec_sp[self.names_aq_sec_sp.index(j)].charge
n = self.list_sorpt_reactions[i].reaction[j]
summ_charges_times_stoichiometric = summ_charges_times_stoichiometric + (n*z)
d[self.length_aq_sec_sp + i] = summ_charges_times_stoichiometric
elif type_sorpt == 'TLM':
d = np.zeros(((self.length_aq_sec_sp + self.length_sorpt_sec_sp), 3))
for i in range(0, self.length_sorpt_sec_sp):
if self.list_sorpt_reactions[i].is_species_in_reaction (name_pri_sp):
names_species_in_reaction = [*self.list_sorpt_reactions[i].reaction]
summ_charges_times_stoichiometric_o = 0
summ_charges_times_stoichiometric_b = 0
for j in names_species_in_reaction:
if j in self.names_aq_pri_sp:
z = self.list_aq_pri_sp[self.names_aq_pri_sp.index(j)].charge
n = self.list_sorpt_reactions[i].reaction[j]
if j =='H+' or j == 'OH-':
summ_charges_times_stoichiometric_o = summ_charges_times_stoichiometric_o + (n*z)
else:
summ_charges_times_stoichiometric_b = summ_charges_times_stoichiometric_b + (n*z)
elif j in self.names_aq_sec_sp:
z = self.list_aq_sec_sp[self.names_aq_sec_sp.index(j)].charge
n = self.list_sorpt_reactions[i].reaction[j]
if j =='H+' or j == 'OH-':
summ_charges_times_stoichiometric_o = summ_charges_times_stoichiometric_o + (n*z)
else:
summ_charges_times_stoichiometric_b = summ_charges_times_stoichiometric_b + (n*z)
d[self.length_aq_sec_sp + i, 0] = summ_charges_times_stoichiometric_o
d[self.length_aq_sec_sp + i, 1] = summ_charges_times_stoichiometric_b
return d
def get_z_vector(self):
z =[]
for i in range(0, self.length_aq_pri_sp):
# if type(self.list_aq_pri_sp[i]) == Aq_Species:
z.append(self.list_aq_pri_sp[i].charge)
for i in range(0, self.length_aq_sec_sp):
z.append(self.list_aq_sec_sp[i].charge)
return z
def search_index_list_classlist (self, list1, list2):
'''
The function returns a list of indices of the position of list1 in list2. --> E.g. list1 =[a c], list2 = [a b c d] function returns listindices = [1,3]
Precondition1: list1 <= list2
Precondition2: list1 is completely include in list2. Otherwise an error occurs
'''
assert len(list1) <= len(list2), "List of species in the chemical system must be equal or smaller than the list os primary species on the database"
list_indices = []
for i in list1:
# appends the index of the list2 that coincide with list1.
list_indices.append(list2.index(i))
return list_indices
def search_index_list_listdictionaryreactions (self, list1, list_dictionaries):
'''
The function returns two list. One with the indices of the reactions that occur in the ChemSys_Surf according to the inputed dictionary, and the other the secondary species in each reaction.
Both, list are in agremment. e.g. l_ind_reaction = [0, 4, 6, 9], l_secondary_species = ['A' 'B' 'C' 'F'] From reaction 0 of the database the secondary species obtained is A, from 6 is C, and so on.
'''
index_reactions = []
name_aq_sec_sp = []
for i in range(0, len(list_dictionaries)):
temp_dict = list_dictionaries[i]
temp_dict_list_keys = list(temp_dict.reaction.keys())
n_s = 0
for j in temp_dict_list_keys:
count = list1.count(j)
if count != 1 and count != 0:
raise ValueError('[ChemSys class, method Index_ReactionsinDatabase] It seems that the name_primary_species property is wrong.')
elif count == 0:
n_s += 1
n_s_name = j
if n_s == 1:
index_reactions.append(i)
name_aq_sec_sp.append(n_s_name)
return index_reactions, name_aq_sec_sp
# Creating first pseudoS
#Setters
# set stoichiometric Matrix
def set_S (self, S, names_species_columns):
self.S = S
self.S_length_rows = S.shape[0]
self.S_length_columns = S.shape[1]
self.S_names_columns = names_species_columns
assert len(names_species_columns) == self.S_length_columns, 'The columns must have the same size that the list of strings containing the name of the species.'
# aqueous component vector
def set_vector_aqueous_component_value(self, list_aq_val):
'''
The value of vector
'''
self.aq_u_vector = list_aq_val
# set names_electrostatic_variables
def set_names_electrostatic_variables (self, names_elsctrostatic_var):
'''
The name of the electrostatic potentials that must be taken into account.
Preferible define them using create_electro_sorption_stoichiometric_M
Since the names_elsctrotatic_var and the amount in general should be related to a surface
'''
self.names_elec_sorpt = names_elsctrostatic_var
self.length_names_elec_sorpt = len(self.names_elec_sorpt)
# set the stoichiometric matrix given by
def set_electro_sorption_stoichiometric_M (self, S_electro):
'''
The S matrix defined having as columns the surface variable potentials and as rows the reactions.
Preferible define them using create_electro_sorption_stoichiometric_M
'''
self.S_electro = S_electro
# Faraday constant
def set_Faraday_constant (self, new_value):
'''
The Faraday constant is instantiated with the class. The Faraday constant has the value 96485.33289(59) C mol−1 [Obtained from WIKI: https://en.wikipedia.org/wiki/Faraday_constant]
The constant is the relationship between the elementary charge or the magnitude of the charge of an electron ['e'] and the Avogrado constant (The number of particles in a mol) [NA]
F = e * NA
e ≈ 1.60217662×10−19 C
NA ≈ 6.02214086×1023 mol−1
Note of one of the authors: I do not think that it should be modified but maybe someone what to play with the value
'''
self.Faraday_constant = new_value
# Temperature
def set_temperature(self, new_T):
'''
Temperature is supposed to be given in kelvins.
'''
self.temperature = new_T
# Universal gas constant
def set_universal_gas_constant (self, r_value):
'''
Set the universal gas constant
'''
self.universal_gas_constant = r_value
# dielectric constant
def set_dielectric_constant (self, e_c):
'''
Set the dielectric constant of water
'''
self.dielectric_constant = e_c
def set_permittivity_free_space (self, eo):
'''
Set permittivity of the free space, or distributed capacitance of the vacuum or vacuum permittivity etc
Not recommended to be used. Unless sure of what are you doing
'''
self.permittivity_free_space = eo
# Calculations
# Dielectric constant of water
def calculate_dielectric_constant(self):
'''
Calculates the dielectric constant
The extra-calculations are baased on the book section 1.1.2.6 Calculation of activity coefficient -- Groundwater Geochemistry --- <NAME>, <NAME>
'''
self.dielectric_constant = 2727.586 + 0.6224107*self.temperature - 466.9151*np.log(self.temperature) - (52000.87/self.temperature)
def calculate_A_activitypar (self):
'''
Calculates the parameter A of the Debye Hueckel equation
The units are supossed to be kg^(1/2)/mol^(1/2)
Actually if you want the L/mol is possible to divide by the square of the density to obtain such value
The extra-calculations are baased on the book section 1.1.2.6 Calculation of activity coefficient -- Groundwater Geochemistry --- <NAME>, <NAME>
'''
A = 1.82483e6*np.sqrt(self.waterdensity)
B = (self.temperature*self.dielectric_constant)**(3/2)
self.A_activitypar = A/B
def calculate_B_activitypar (self):
'''
Calculates the parameter A of the Debye Hueckel equation
The units are supossed to be kg^(1/2)/mol^(1/2)*cm
Actually if you want the L/mol is possible to divide by the square of the density to obtain such value
The extra-calculations are baased on the book section 1.1.2.6 Calculation of activity coefficient -- Groundwater Geochemistry --- <NAME>, <NAME>
Here the equation is a bit different than that given in the book. The book takes the equation from
Theoretical prediction of the thermodynamic behavior of aqueous electrolytes at high pressures and temperatures; II, Debye Huckel parameters for activity coefficients and relative partial molal properties
The differences is 10-8 and is related to the fact that they uses angstroms instead of cm
'''
A = 50.29158649e8*np.sqrt(self.waterdensity)
B = np.sqrt(self.temperature*self.dielectric_constant)
self.B_activitypar = A/B
def calculate_waterdensity (self):
'''
Calculates the density of the water
The extra-calculations are baased on the book section 1.1.2.6 Calculation of activity coefficient -- Groundwater Geochemistry --- <NAME>, <NAME>
'''
Tc = self.temperature - 273.15
A = (Tc-3.9863)**2
B = Tc + 288.9414
C = Tc + 68.12963
D = (A*B)/(508929.2*C)
E = 0.011445*np.exp(-374.3/Tc)
self.waterdensity = 1 - D + E
############################################################################
##### instantiation_step ()
#####
#############################################################################
def instantiation_step (self, type_I=1):
'''
'''
if type_I == 1:
c_ini = np.ones(self.S_length_columns)*1e-3
return c_ini
############################################################################################################################################################
################# Speciation and related algorithms ########################################################################################################
############################################################################################################################################################
#
def speciation_Westall1980_CCM (self, tolerance = 1e-6, max_iterations = 100, c_guess = None):
'''
Implementation of the algorithm given in "Chemical Equilibrium Including Adsorption on Charged Surfaces" Westall, 1980
ages 37-to-39
'''
# instantiation of unknowns
if np.any(c_guess == None):
c_guess = self.instantiation_step (type_I = 1)
c_n =c_guess
pos_start_elec = self.length_aq_pri_sp + self.length_sorpt_pri_sp
pos_end_elec = self.length_aq_pri_sp + self.length_sorpt_pri_sp + self.length_names_elec_sorpt
S1, S2 = self.separte_S_into_S1_and_S2()
sorpt_u_vector = self.create_sorpt_vec()
T_chem = np.concatenate ((self.aq_u_vector, sorpt_u_vector))
# instantiation variables for loop
counter_iterations = 0
err = tolerance + 1
while err>tolerance and counter_iterations < max_iterations:
# Calculate U vector [If I am not wrong T_sigma must be calculated at every step, since it depends somehow in the surface potential, and it is unknown]
u_electro = self.calculate_u_electro(c_n[pos_start_elec:pos_end_elec], c_n)
T = np.concatenate ((T_chem, u_electro))
# Calculate f or better said in this specific case Y
Y = self.U.dot(c_n) - T
# Calculate Z
Z = self.Jacobian_Speciation_Westall1980(c_n, pos_start_elec, pos_end_elec)
# Calculating the diff, Delta_X
# In the paper Delta_X is X_old - X_new or as they called X_original - X_improved.
# I am writing X_new- X-old, hence I use -Y instead of Y.
delta_X = linalg.solve(Z,-Y)
# The error will be equal to the maximum increment
err = max(abs(delta_X))
# Relaxation factor borrow from <NAME> to avoid negative values
max_1 = 1
max_2 =np.amax(-2*np.multiply(delta_X, 1/c_n[0:pos_end_elec]))
Max_f = np.amax([max_1, max_2])
Del_mul = 1/Max_f
# Update
c_n[0:pos_end_elec] = c_n[0:pos_end_elec] + Del_mul*delta_X # Update primary species
log_c2 = np.matmul(linalg.inv(S2), self.log_k_vector - np.matmul(S1, np.log10(c_n[0:pos_end_elec]))) # Update secondary
c_n[pos_end_elec:] =10**log_c2
counter_iterations += 1
if counter_iterations >= max_iterations:
raise ValueError('Max number of iterations surpassed.')
self.c = c_n
return c_n
def speciation_Westall1980_CCM_v2 (self, tolerance = 1e-6, max_iterations = 100, x = None):
'''
Implementation of the algorithm given in "Chemical Equilibrium Including Adsorption on Charged Surfaces" Westall, 1980
ages 37-to-39
'''
# scipy.optimize.newton(func, x0, fprime=None, args=(), tol=1.48e-08, maxiter=50, fprime2=None, x1=None, rtol=0.0, full_output=False, disp=True)[source]
S1, S2 = self.separte_S_into_S1_and_S2()
pos_start_elec = self.length_aq_pri_sp + self.length_sorpt_pri_sp
pos_end_elec = self.length_aq_pri_sp + self.length_sorpt_pri_sp + self.length_names_elec_sorpt
sorpt_u_vector = self.create_sorpt_vec()
T_chem = np.concatenate ((self.aq_u_vector, sorpt_u_vector))
#c_pri = optimize.newton(self.func_newton, x, args = (T_chem, pos_start_elec, pos_end_elec, S1, S2), fprime = self.Jacobian_Speciation_Westall1980_func)
c_pri = optimize.fsolve(self.func_newton, x, args = (T_chem, pos_start_elec, pos_end_elec, S1, S2), fprime = self.Jacobian_Speciation_Westall1980_func)
log_c2 = np.matmul(linalg.inv(S2), self.log_k_vector - np.matmul(S1, np.log10(c_pri))) # Update secondary
c2 =10**log_c2
c_n = np.concatenate ((c_pri, c2))
self.c = c_n
return c_n
def func_newton (self, x, T_chem, pos_start_elec, pos_end_elec, S1, S2):
'''
x is the vector of primary species
'''
log_c2 = np.matmul(linalg.inv(S2), self.log_k_vector - np.matmul(S1, np.log10(x))) # Update secondary
c2 =10**log_c2
c_n = np.concatenate ((x, c2))
u_electro = self.calculate_u_electro(x[pos_start_elec:pos_end_elec], c_n)
T = np.concatenate ((T_chem, u_electro))
Y = self.U.dot(c_n) - T
return Y
def Jacobian_Speciation_Westall1980_func (self, x, T_chem, pos_start_elec, pos_end_elec, S1, S2):
log_c2 = np.matmul(linalg.inv(S2), self.log_k_vector - np.matmul(S1, np.log10(x))) # Update secondary
c2 =10**log_c2
c_n = np.concatenate ((x, c2))
return self.Jacobian_Speciation_Westall1980(c_n, pos_start_elec, pos_end_elec)
def speciation_Westall1980_v3 (self, tolerance = 1e-6, max_iterations = 100, Ln_x = None, activity_b = False):
'''
Implementation of the algorithm given in "Chemical Equilibrium Including Adsorption on Charged Surfaces" Westall, 1980
ages 37-to-39.
That is the third version, here we will try to work with ln(X) as primary species instead of X. Such thing have an effect in the formulation.
Specifically, the Newton-Rapshon jacobian of the system should become symetric (I am not taking into account activity, not sure if using activity and its derivatives the matrix is still symetric)
The activity_b is just a boolean that if true, the speciaiton of the secondary species in is done by substitution of
'''
# scipy.optimize.newton(func, x0, fprime=None, args=(), tol=1.48e-08, maxiter=50, fprime2=None, x1=None, rtol=0.0, full_output=False, disp=True)[source]
S1, S2 = self.separte_S_into_S1_and_S2()
pos_start_elec = self.length_aq_pri_sp + self.length_sorpt_pri_sp
pos_end_elec = self.length_aq_pri_sp + self.length_sorpt_pri_sp + self.length_names_elec_sorpt
sorpt_u_vector = self.create_sorpt_vec()
T_chem = np.concatenate ((self.aq_u_vector, sorpt_u_vector))
lnK = self.log_k_vector/np.log10(np.e) # Changing the base from log_10 to ln (log_e)
#c_pri = optimize.newton(self.func_newton, x, args = (T_chem, pos_start_elec, pos_end_elec, S1, S2), fprime = self.Jacobian_Speciation_Westall1980_func)
ln_c_pri = optimize.fsolve(self.residual_fun_v3, Ln_x, args = (lnK, T_chem, pos_start_elec, pos_end_elec, S1, S2, activity_b), fprime = self.Jacobian_Residual_fun_v3)
ln_c2 = np.matmul(linalg.inv(S2), lnK - np.matmul(S1, ln_c_pri))
c1 = np.exp(ln_c_pri)
c2 = np.exp(ln_c2)
c_n = np.concatenate ((c1, c2))
self.c = c_n
return c_n
def residual_fun_v3 (self, x, lnK, T_chem, pos_start_elec, pos_end_elec, S1, S2, activity_b):
'''
This functions is not the 3rd version of an old function but it is related to the speciation_Westall1980_v3.
The main algorithm uses the algorithms and formulas that can be found on the Westall paper but for the unknown variables it relies on ln X variables instead of just X variables.
The function that I must bild is still Y = U*c -T
what changes is how the c parameters are obtained. Before we assumed that our indepent variable was a sort of concentration, now the variable is exactly the lnX of the sort of concentration
Hence the ecuation for c is translated into:
c = exp(lnKi+sum(aik*lnX))
but since we are using the stoichiometric matrix the relationship will be
lnC2 = inv(S2)*lnk - inv(S2)*S1*lnX
and c is the concatenation of c = exp(lnX) and exp(lnC2)
'''
if activity_b == False:
c_n = self.speciation_no_activity_v3 (lnK, S1, S2, x)
elif activity_b == True:
c_n = self.speciation_activity_v3 (lnK, S1, S2, x)
c1 = np.exp(x)
u_electro = self.calculate_u_electro(c1[pos_start_elec:pos_end_elec], c_n)
T = np.concatenate ((T_chem, u_electro))
Y = self.U.dot(c_n) - T
return Y
def Jacobian_Residual_fun_v3 (self, x, lnK, T_chem, pos_start_elec, pos_end_elec, S1, S2, activity_b):
'''
This functions is not the 3rd version of an old function but it is related to the speciation_Westall1980_v3.
'''
if activity_b == False:
c_n = self.speciation_no_activity_v3 (lnK, S1, S2, x)
elif activity_b == True:
c_n = self.speciation_activity_v3 (lnK, S1, S2, x)
return self.Jacobian_Speciation_Westall1980_modification_lnX (c_n, pos_start_elec, pos_end_elec)
def speciation_no_activity_v3 (self, lnK, S1, S2, x):
ln_c2 = np.matmul(linalg.inv(S2), lnK - np.matmul(S1, x))
c1 = np.exp(x)
c2 = np.exp(ln_c2)
c_n = np.concatenate ((c1, c2))
return c_n
def speciation_activity_v3 (self, lnK, S1, S2, x):
c_1 = np.exp(x)
c_2 = np.zeros(S2.shape[1])
c_2 = self.subfunction_of_speciation_activity_v3 (c_2, c_1, lnK, S1, S2)
c_2 = optimize.fixed_point(self.subfunction_of_speciation_activity_v3, c_2, args = (c_1, lnK, S1, S2))
#
# tolerance = 1e-8
# n_max_iterations = 100
#error = 1
# I need to implement some sort of Picard method
#c_1 = np.exp(x)
#c_2 = np.zeros(S2.shape[1])
# c_k = self.subfunction_of_speciation_activity_v3 (c_2, c_1, lnK, S1, S2)
#counter = 0
#while error > tolerance and counter < n_max_iterations:
# c_k1 = self.subfunction_of_speciation_activity_v3 (c_k, c_1, lnK, S1, S2)
# error = max(abs(c_k1-c_k))
# print(error)
#c_k = c_k1.copy()
#counter += 1
#if counter >= n_max_iterations:
# raise ValueError('Max number of iterations surpassed in speciation_activity_v3 (self, lnK, S1, S2, x.')
c_n = np.concatenate((c_1, c_2))
return c_n
def subfunction_of_speciation_activity_v3 (self, c_2, c_1, lnK, S1, S2):
c_a_pri = c_1[:self.length_aq_pri_sp]
c_a_sec = c_2[:self.length_aq_sec_sp]
ionic_strength = self.calculate_ionic_strength (np.concatenate((c_a_pri, c_a_sec)))
log_a_coeff_aq_pri_sp = self.calculate_log_activity_coefficient_aq_pri_species (ionic_strength)
a_coeff_aq_pri_sp = 10**(log_a_coeff_aq_pri_sp)
log_a_coeff_aq_sec_sp = self.calculate_log_activity_coefficient_aq_sec_species (ionic_strength)
a_coeff_aq_sec_sp = 10**(log_a_coeff_aq_sec_sp)
if 'H2O' in self.names_aq_pri_sp:
ind = self.names_aq_pri_sp.index('H2O')
c_a_pri_t = np.delte(c_a_pri, ind)
a_coeff_aq_pri_sp [ind] = 1-(0.018*np.sum(np.concatenate ((c_a_pri_t, c_a_sec))))
elif 'H2O' in self.names_aq_sec_sp:
ind = self.names_aq_sec_sp.index('H2O')
c_a_sec_t = np.delte(c_a_sec, ind)
a_coeff_aq_sec_sp [ind] = 1-(0.018*np.sum(np.concatenate ((c_a_pri, c_a_sec_t))))
c_1[:self.length_aq_pri_sp] = c_1[:self.length_aq_pri_sp]*a_coeff_aq_pri_sp
ln_c1_a1 = np.log(c_1)
ln_c2_a2 = np.matmul(linalg.inv(S2), lnK - np.matmul(S1, ln_c1_a1))
ln_c2_a2[:self.length_aq_sec_sp] = ln_c2_a2[:self.length_aq_sec_sp] - np.log(a_coeff_aq_sec_sp)
c_2 = np.exp(ln_c2_a2)
print(c_2)
return c_2
def Jacobian_Speciation_Westall1980_modification_lnX (self, C, n_aq_plus_n_sorpt, n_primaryspecies):
'''
The jacobian matrix following an implementation based on the algorithm of Westall (1980)
"Chemical equilibrium Including Adsorption on Charged Surfaces"
Pages 37-to-39
It is assumed that C is order first with the primary species and then with the secondary species such as C = [C1 C2]
This function is identical to Jacobian_Speciation_Westall1980 but it has been modified considering the lnX the unknown variable.
That means that the derivation of the residual function for the Newton-Raphson process is done by lnC1 (or LnX) and not C1 (or X)
primary function:
zjk = sum(aij*aik*Ci/Xk) becomes now zjk = sum(aij*aik*Ci)
For CCM:
z_psipsi = sum(aij*aipsi*Ci/Xpsi) + (s*a*C*R*T)/(F*F*Xpsi)
becomes now
z_psipsi = sum(aij*aipsi*Ci) + (s*a*C*R*T)/(F*F)
For TLM:
'''
# The first part treats all terms as it was a normal speciation
Z = np.zeros((n_primaryspecies, n_primaryspecies))
for i in range(0, n_primaryspecies):
for j in range(0, n_primaryspecies):
Z[i,j]= np.matmul(np.multiply(self.U[i,:], self.U[j,:]), C)
# According to the point 2 of Table III of Westall the term C*sa/F*RT/Funknwon must be added to the electrostatic part
# I am supposing here that all the sorption phases are CCM
for i in range(0, self.length_sorpt_pri_sp):
pos_unknown_vector = n_aq_plus_n_sorpt
# I am supposing here that the sorption phases are CCM
if self.list_sorpt_pri_sp[i].type_sorption == 'CCM':
D1 = self.universal_gas_constant*self.temperature
D2 = self.Faraday_constant
F = ((self.list_sorpt_pri_sp[i].sp_surf_area*self.list_sorpt_pri_sp[i].solid_concentration_or_grams)/self.Faraday_constant)
Z[pos_unknown_vector,pos_unknown_vector] = Z[pos_unknown_vector, pos_unknown_vector] + (self.list_sorpt_pri_sp[i].C1*F)*(D1/D2)
pos_unknown_vector += 1
# I am supposing here that the sorption phases are TLM
elif self.list_sorpt_pri_sp[i].type_sorption == 'TLM':
D1 = self.universal_gas_constant*self.temperature
D2 = self.Faraday_constant
D3 = self.Faraday_constant
D4 = self.Faraday_constant
F = ((self.list_sorpt_pri_sp[i].sp_surf_area*self.list_sorpt_pri_sp[i].solid_concentration_or_grams)/self.Faraday_constant)
# O-plane
# plane 0 - 0
Z[pos_unknown_vector,pos_unknown_vector] = Z[pos_unknown_vector, pos_unknown_vector] + (self.list_sorpt_pri_sp[i].C1*F)*(D1/D2)
# plane 0 - b
Z[pos_unknown_vector,pos_unknown_vector+1] = Z[pos_unknown_vector, pos_unknown_vector+1] - (self.list_sorpt_pri_sp[i].C1*F)*(D1/D3)
# plane 0 - d
# plane b - 0
Z[pos_unknown_vector + 1,pos_unknown_vector] = Z[pos_unknown_vector + 1,pos_unknown_vector] - (self.list_sorpt_pri_sp[i].C1*F)*(D1/D2)
# plane b - b
Z[pos_unknown_vector + 1,pos_unknown_vector + 1] = Z[pos_unknown_vector + 1,pos_unknown_vector + 1] + ((self.list_sorpt_pri_sp[i].C1+self.list_sorpt_pri_sp[i].C2)*F)*(D1/D3)
# plane b - d
Z[pos_unknown_vector + 1,pos_unknown_vector + 2] = Z[pos_unknown_vector + 1,pos_unknown_vector + 2] - (self.list_sorpt_pri_sp[i].C2*F)*(D1/D4)
# plane d - 0
# plane d - b
Z[pos_unknown_vector + 2,pos_unknown_vector + 1] = Z[pos_unknown_vector + 2,pos_unknown_vector + 1] - (self.list_sorpt_pri_sp[i].C2*F)*(D1/D3)
# plane d - d
# The part below correspond to the paper, which is wrong and must be deleted, once all part agree.
# A = -F/(2*R*T)
#param = self.Faraday_constant/(2*(self.universal_gas_constant*self.temperature))
#A = -param
#
#pos_C = self.length_aq_pri_sp+self.length_sorpt_pri_sp+self.length_names_elec_sorpt
#C_aq = np.concatenate((C[:self.length_aq_pri_sp], C[pos_C : (pos_C + self.length_aq_sec_sp)]))
#
#I = self.calculate_ionic_strength(C_aq)
#B = np.sqrt(8*self.permittivity_free_space*self.dielectric_constant*self.universal_gas_constant*self.temperature*I)
#psi_d = self.Boltzman_factor_2_psi(C[pos_unknown_vector+2])
#par_C = param*psi_d
#C = np.cosh(par_C)
#F_d = A*B*C
#Z[pos_unknown_vector + 2,pos_unknown_vector + 2] = F_d + (self.list_sorpt_pri_sp[i].C2*F)*(D1/D4)
pos_C = self.length_aq_pri_sp+self.length_sorpt_pri_sp+self.length_names_elec_sorpt
C_aq = np.concatenate((C[:self.length_aq_pri_sp], C[pos_C : (pos_C + self.length_aq_sec_sp)]))
I = self.calculate_ionic_strength(C_aq)
B = np.sqrt(8*self.permittivity_free_space*self.dielectric_constant*self.universal_gas_constant*self.temperature*I)
B_half = B/2
C = np.cosh(-np.log(C[pos_unknown_vector+2])/2)
F_d = C*B_half
Z[pos_unknown_vector + 2,pos_unknown_vector + 2] = F_d + (self.list_sorpt_pri_sp[i].C2*F)*(D1/D4)
pos_unknown_vector +=3
return Z
def speciation_Westall1980_TLM (self, tolerance = 1e-6, max_iterations = 100, c_guess = None):
'''
Implementation of the algorithm given in "Chemical Equilibrium Including Adsorption on Charged Surfaces" Westall, 1980
ages 37-to-39
'''
# instantiation of unknowns
if np.any(c_guess == None):
c_guess = self.instantiation_step (type_I = 1)
c_n = c_guess
pos_start_elec = self.length_aq_pri_sp + self.length_sorpt_pri_sp
pos_end_elec = self.length_aq_pri_sp + self.length_sorpt_pri_sp + self.length_names_elec_sorpt
S1, S2 = self.separte_S_into_S1_and_S2()
sorpt_u_vector = self.create_sorpt_vec()
T_chem = np.concatenate ((self.aq_u_vector, sorpt_u_vector))
# instantation variables loop
counter_iterations = 0
err = tolerance + 1
while err>tolerance and counter_iterations < max_iterations:
# Calculate U vector [If I am not wrong T_sigma must be calculated at every step, since it depends somehow in the surface potential, and it is unknown]
u_electro = self.calculate_u_electro(c_n[pos_start_elec:pos_end_elec], c_n)
T = np.concatenate ((T_chem, u_electro))
# Calculate f or better said in this specific case Y
Y = self.U.dot(c_n) - T
# Calculate Z
Z = self.Jacobian_Speciation_Westall1980(c_n, pos_start_elec, pos_end_elec)
# Calculating the diff, Delta_X
# In the paper Delta_X is X_old - X_new or as they called X_original - X_improved.
# I am writing X_new- X-old, hence I use -Y instead of Y.
delta_X = linalg.solve(Z,-Y)
#delta_X = sp.sparse.linalg.gmres(Z,-Y)
#delta_X = delta_X[0]
#print(delta_X)
# The error will be equal to the maximum increment
err = max(abs(delta_X))
print(err)
# Relaxation factor borrow from <NAME> to avoid negative values
max_1 = 1
max_2 =np.amax(-2*np.multiply(delta_X, 1/c_n[0:pos_end_elec]))
Max_f = np.amax([max_1, max_2])
Del_mul = 1/Max_f
# Update
c_n[0:pos_end_elec] = c_n[0:pos_end_elec] + Del_mul*delta_X # Update primary species
log_c2 = np.matmul(linalg.inv(S2), self.log_k_vector - np.matmul(S1, np.log10(c_n[0:pos_end_elec]))) # Update secondary
c_n[pos_end_elec:] =10**log_c2
counter_iterations += 1
if counter_iterations >= max_iterations:
raise ValueError('Max number of iterations surpassed.')
self.c = c_n
return c_n
def speciation_Borkovec_1983_DLM (self, tolerance = 1e-6, max_iterations = 100, c_guess = None, A_Borkovec = None, names_col = None, names_row = None ):
'''
Implementation of the algorithm given in "Solution of the poisson-boltzman equation for surface excesses of ions in the diffuse layer at the oxide-electrolyte interface" Borkovec 1983
There are some parts of this algorithm that are not clear for me, hence I will try to implement it as it is given in the paper.
'''
# modified matrices must be given:
if A_Borkovec == None and not hasattr(self, 'A_Borkovec'):
self.create_A_Borkovec()
A = self.A_Borkovec
if names_col == None:
name_col = self.A_Borkovec_columns
if names_row == None:
name_row = self.A_Borkovec_rows
# The upper part can be expanded to add more outside inputs (Maybe later)
# for equaiton 20, I need the right K
S1, S2 = self.separte_S_into_S1_and_S2()
l_k_comp = np.matmul(linalg.inv(S2),self.log_k_vector)
K_eqn20_bulk = np.concatenate((np.zeros(self.length_aq_pri_sp), l_k_comp[:self.length_aq_sec_sp]))
K_eqn20_surface = np.concatenate((np.zeros(self.length_sorpt_pri_sp), l_k_comp[self.length_aq_sec_sp:]))
K_eqn20 = np.concatenate((K_eqn20_bulk, K_eqn20_surface))
# Borkovec_1983- QUOTE (pag. 333) : To circumvent these difficulties one can use an iterative procedure consisting of an initial step to establish electroneutrality in the bulk, and then alternately (i) recomputing g with
# the electroneutrality condition fulfilled, and ii) using the constant values of g in solving the equilibrium problems.
# instantation variables loop
counter_iterations = 0
err = tolerance + 1
'''
Borkovec_1983 - QUOTE (page. 334) --> The initial step is made by treating the asymmetric electrolyte as a symmetric electrolyte of the same ionic strength, using eqn. (16) to evaluate g, and solving the equilibrium problem defined by eqns. (20)
and (22). There is of course no requirement for electroneutrality when evaluating g by eqn. (16). Once the equilibrium problem is solved, albeit with only approximate values of g, the electroneutrality condition in the bulk is fulfilled, and corrected values
of g can be evaluated from eqn. (11)
'''
# The values of the vector X must be instantiated
sorpt_u_vector = self.create_sorpt_vec()
T_chem = np.concatenate ((self.aq_u_vector, sorpt_u_vector))
T = np.concatenate((T_chem, np.zeros(self.length_names_elec_sorpt)))
# concentration of the components_ initial instantiation
Xd = 1.1; # <-- This part might be changed by an instantiation function
X = np.concatenate((T_chem, np.array([Xd])))
# initial guess, concentration species
c = K_eqn20 + np.matmul(A,np.log10(X)) # This part here is equation 20
c = 10**c
z_vec = self.get_z_vector()
''' First part according to Borkovec 1983 - page 333-334, solving assuming symmetric electrolyte '''
while err>tolerance and counter_iterations < max_iterations:
I = self.calculate_ionic_strength(c[:self.length_aq_pri_sp + self.length_aq_sec_sp])
# g must be calculated to create the matrix B
g_vec = self.calculate_g_vec_Borkovec_1983_eqn_16(I, X[-1])
# Now that vector g (assuming symmetrical electrolyte) --> I can build the B matrix and find Y
B = self.create_B_Borkovec(A, g_vec)
# Calculating Y. The Y is given in equation 22 in Borkovec(1983)
Y = np.matmul(B.transpose(), c) - T
# Now the jacobian must be created
Z = self.create_jacobian_Borkovec_1983_symm(A, B, c, X, I, z_vec ,g_vec)
delta_X = linalg.solve(Z,-Y)
#print(delta_X)
# The error will be equal to the maximum increment
err = max(abs(delta_X))
# Relaxation factor borrow from <NAME> to avoid negative values
max_1 = 1
max_2 =np.amax(-2*np.multiply(delta_X, 1/X))
Max_f = np.amax([max_1, max_2])
Del_mul = 1/Max_f
# Update
X = X + Del_mul*delta_X # Update primary species
c = K_eqn20 + np.matmul(A,np.log10(X)) # This part here is equation 20
c = 10**c
counter_iterations += 1
if counter_iterations >= max_iterations:
raise ValueError('Max number of iterations surpassed.')
X_o = X.copy()
c_o = c.copy()
''' Second part, assuming no symmetric electrolyte
This part today (14/12/2018) is to me not completely clear. Hence, I will see how these approach works.
I notice that calculating g_vec_o is not equal to the old g_vec value:
DISCUSS IT with Heberling und Luetzenkirchen
'''
g_vec_o = self.calculate_g_vec_Borkovec_1983_eqn_11(z_vec, c_o[:self.length_aq_pri_sp + self.length_aq_sec_sp], X_o[-1]) # Necessary for equation 36 of Borkovec 1983
g_vec_o = np.array(g_vec_o)
dg_dXd_vec_o = self.dg_dXd_vec_eqn_11(z_vec, c_o[:self.length_aq_pri_sp + self.length_aq_sec_sp], X_o[-1]) # Necessary for equation 36 of Borkovec 1983
dg_dXd_vec_o = np.array(dg_dXd_vec_o)
# instantation variables loop
counter_iterations = 0
err = tolerance + 1
while err>tolerance and counter_iterations < max_iterations:
# g must be calculated to create the matrix B
g_vec = g_vec_o + dg_dXd_vec_o*(X[-1]-X_o[-1])
# Now that vector g (assuming asymmetrical electrolyte) --> I can build the B matrix and find Y
B = self.create_B_Borkovec(A, g_vec)
# Calculating Y. The Y is given in equation 22 in Borkovec(1983)
Y = np.matmul(B.transpose(), c) - T
# Now the jacobian must be created
Z = self.create_jacobian_Borkovec_1983_asymm( A, B, c, X, z_vec, g_vec)
delta_X = linalg.solve(Z,-Y)
# The error will be equal to the maximum increment
err = max(abs(delta_X))
# Relaxation factor borrow from <NAME> to avoid negative values
max_1 = 1
max_2 =np.amax(-2*np.multiply(delta_X, 1/X))
Max_f = np.amax([max_1, max_2])
Del_mul = 1/Max_f
# Update
X = X + Del_mul*delta_X # Update primary species
c = K_eqn20 + np.matmul(A,np.log10(X)) # This part here is equation 20
c = 10**c
counter_iterations += 1
if counter_iterations >= max_iterations:
raise ValueError('Max number of iterations surpassed.')
self.c_Borkovec = c
return c
def dg_dXd_vec_eqn_11(self, z_vec, cb, Xd):
'''
In eqn 36 of Borkovec there is a term evaluated at c_o and Xd_o which is necessary for the calculation of the g factors.
The variable to integrate is in the integrand limit values of the equation.
The way to develop it can be found here: https://math.stackexchange.com/questions/716596/derivative-of-definite-integral
basically means:
int[a(x),b(x)] f(t) dt = F(a(x), b(x))
dF/dx = (dF/da)*(da/dx) - (dF/db)*(db/dx) = f(a(x))*(da/dx) - f(b(x))*(db/dx)
so for our specific case:
a(Xd) = Xd --> da/dXd = 1
b(Xd) = 1 --> db/dXd = 0
and f(t) will be the integrand of equation 11
'''
dg_dXd = []
if Xd-1 >= 0 :
b = 1
else:
b = -1
sa_F = self.list_sorpt_pri_sp[0].sp_surf_area*(self.list_sorpt_pri_sp[0].solid_concentration_or_grams/self.Faraday_constant)
alpha = self.alpha_Borkovec_1983()
partA = sa_F*b*alpha
for i in range(0, len(z_vec)):
zi = z_vec[i]
partB = self.integrand_fun_Borkovec_1983_eqn_11(Xd, zi, z_vec, cb)
dg_dXd.append(partA*partB)
return dg_dXd
def calculate_g_vec_Borkovec_1983_eqn_11 (self, z_vec, cb, Xd):
'''
This function should give a result value to the equation 11 stated in Borkovec 1983, If the parameters given are correct.
'''
g = []
tol = 1e-4
if Xd-1 >= 0 :
b = 1
else:
b = -1
sa_F = self.list_sorpt_pri_sp[0].sp_surf_area*(self.list_sorpt_pri_sp[0].solid_concentration_or_grams/self.Faraday_constant)
alpha = self.alpha_Borkovec_1983()
partA = sa_F*b*alpha
for i in range(0, len(z_vec)):
zi = z_vec[i]
partB = integrate.quad(self.integrand_fun_Borkovec_1983_eqn_11, 1, Xd, args = (zi,z_vec, cb))
if partB[1] > tol:
raise ValueError('equation 11, integration of integrand high numerical error')
g.append(partA*partB[0])
return g
#def integrand_fun_Borkovec_1983_eqn_11 (self, x, zi,z_vec, cb):
# a = (x**zi)-1
# b= 0
# for i in range(0, len(z_vec)):
# b = b + cb[i]*((x**z_vec[i])-1)
#b = x*x*b
#return a/b
#https://scicomp.stackexchange.com/questions/30715/how-to-cope-with-the-following-singularity?noredirect=1#comment56672_30715
def integrand_fun_Borkovec_1983_eqn_11 (self, x, zi,z_vec, cb):
'''
External help has been provided, here the link for this help. Maybe it should be asked to a mathematician working in this area.
https://scicomp.stackexchange.com/questions/30715/how-to-cope-with-the-following-singularity?noredirect=1#comment56672_30715
Actually the guy who provided the answer is : <NAME> from Humboldt-Universität zu Berlin (He is a mathematician). So I assume it is ok.
'''
a = self.term_integrand_fun_Borkovec_1983_eqn_11 (x, zi)
b= 0
for i in range(0, len(z_vec)):
b = b + cb[i]*self.term_integrand_fun_Borkovec_1983_eqn_11(x, z_vec[i])
b = x*x*b
#b = (1e-20+max(0,b))**0.5
b = abs(b)**0.5
#print(b)
#print(x)
return a/b
def term_integrand_fun_Borkovec_1983_eqn_11(self, X, z):
if abs(X-1)>1e-8:
return X**z-1 # If I am not close to zero I return (X^z)-1
return z*(X-1)*(1+(z-1)*(X-1)/2.0*(1+(z-2)*(X-1)/3.0))
def create_jacobian_Borkovec_1983_asymm (self, A, B, c, X, z_vector, g_vector):
'''
In the appendix (Borkovec_1983) is written the following, I quote:
"(ii) For the case of the asymmetric electrolyte with k != d, we need only the first term of eqn.(A2), since in the iteration procedure we define the gi's to be function of Xd only."
That means that the gi used is the one of equation 36, and hence gi is only function of Xd. Then the quoute continues with:
"For k = d the derivative needed is simply the integrand of eqn. (11) evaluated at Xd"
'''
assert len(z_vector)==len(g_vector), " [create_jacobian_Borkovec_1983_symm] vectors of charge and vector of factor g are not equal. Something must be wrong."
Nx = len(X)
Ns = len(c)
n_iprime = len(g_vector)
Z = np.zeros((Nx, Nx))
# There is a term that is repeated in all part of the matrix, also when k = d
#
# Sum(bij*aik* ci/Xk)
# Such term will be the first to be calculated.
for j in range(0, Nx):
for k in range(0, Nx):
for i in range(0, Ns): #Sum(bij*aik* ci/Xk)
Z[j, k] = Z[j, k] + B[i,j]*A[i,k]*(c[i]/X[k])
if k == (Nx-1):
Z[j, k] = Z[j, k] + self.term_A4_Borkovec_asym(n_iprime, z_vector, X[k], c[:n_iprime])
return Z
def create_jacobian_Borkovec_1983_symm (self, A, B, c, X, I, z_vector, g_vector):
'''
Creating the jacobian for the Newton-Rapshon procedure. The algorithm is given in Borkovec(1983), you need to apply the info of the appendix, plus de info of the paper.
Some parameter are slightly tricky, but it seems that everything is ok, except the alpha parameter that I do no trust.
This jacobian is treated as a symmetric electrolyte, namely equations (A.1 -A.4) of the appendix
dY/dX = dYj/dXk
'''
assert len(z_vector)==len(g_vector), " [create_jacobian_Borkovec_1983_symm] vectors of charge and vector of factor g are not equal. Something must be wrong."
Nx = len(X)
Ns = len(c)
n_iprime = len(g_vector)
Z = np.zeros((Nx, Nx))
# There is a term that is repeated in all part of the matrix, also when k = d
#
# Sum(bij*aik* ci/Xk)
# Such term will be the first to be calculated.
for j in range(0, Nx):
for k in range(0, Nx):
for i in range(0, Ns): #Sum(bij*aik* ci/Xk)
Z[j, k] = Z[j, k] + B[i,j]*A[i,k]*(c[i]/X[k])
if k != (Nx-1):
Z[j, k] = Z[j, k] + self.term_A2_and_A3_Borkovec(n_iprime, j, k, A, c, X,g_vector,z_vector, I)
elif k == (Nx-1): #There is one term for all K, except k = d and one for all
Z[j, k] = Z[j, k] + self.term_A4_Borkovec_sym(n_iprime,I, z_vector, X[k], c)
return Z
def term_A4_Borkovec_asym(self, n_iprime, z_vector, Xd, c):
dg_dXd_vec = self.dg_dXd_vec_eqn_11(z_vector, c, Xd)
b = sum(cb*z*dg for cb,z,dg in zip(c[:n_iprime],z_vector,dg_dXd_vec))
return b
def term_A2_and_A3_Borkovec(self, n_iprime, j, k, A, c, X, g_vector,z_vector, I):
v = 0
R = 0
for iprime in range(0, n_iprime):
v = v + ((z_vector[iprime]**2)/2)*A[iprime, k]*(c[iprime]/X[k])
for iprime in range(0, n_iprime):
R = R + c[iprime]*A[iprime, j]*(-g_vector[iprime]/(2*I))*v
return R
def term_A4_Borkovec_sym(self, n_iprime, I, z_vector, X_d, c):
R = 0
alpha = self.alpha_Borkovec_1983()
for iprime in range(0, n_iprime):
dgiprime_dXd = self.calculate_dg_dXd_Borkovec_1983_eqn_16 (I, alpha, X_d, z_vector[iprime])
R = R + c[iprime]*z_vector[iprime]*dgiprime_dXd
return R
def alpha_Borkovec_1983 (self):
'''
I THINK THERE IS A TYPO HERE (parameter alpha); I AM USING EQUATION 13 BUT I THINK THE EQUATION IS WRONG: SO I USE A MODIFIED ONE; I MUST ASK THE AUTHORS
'''
return np.sqrt((self.dielectric_constant*self.permittivity_free_space)/(2*self.universal_gas_constant*self.temperature))
def calculate_g_vec_Borkovec_1983_eqn_16 (self, I, X_d):
'''
It calculates the g factors of the paper of Borkovec (1983) using equation 16.
Precondition: The concentration given is order: First primary aqueous species, in the same order that the list of the class. Then secondary species, in the same order as they are saved in the class.
'''
g = []
alpha = self.alpha_Borkovec_1983()
for i in range(0, self.length_aq_pri_sp):
# if type(self.list_aq_pri_sp[i]) == Aq_Species:
z = self.list_aq_pri_sp[i].charge
g.append(self.calculate_g_Borkovec_1983_eqn_16 ( I, alpha, X_d, z))
for i in range(0, self.length_aq_sec_sp):
# if type(self.list_aq_sec_sp[i]) == Aq_Species:
z = self.list_aq_sec_sp[i].charge
g.append(self.calculate_g_Borkovec_1983_eqn_16 ( I, alpha, X_d, z))
return g
def calculate_g_Borkovec_1983_eqn_16 (self, I, alpha, X_d, z):
g = 2*alpha*(1/np.sqrt(I))*((X_d**(z/2))-1)*self.list_sorpt_pri_sp[0].sp_surf_area*(self.list_sorpt_pri_sp[0].solid_concentration_or_grams/self.Faraday_constant)
return g
def calculate_dg_dXd_Borkovec_1983_eqn_16 (self, I, alpha, X_d, z):
dg_dXd = 2*alpha*(1/np.sqrt(I))*(z/2)*(X_d**((z/2)-1))*self.list_sorpt_pri_sp[0].sp_surf_area*(self.list_sorpt_pri_sp[0].solid_concentration_or_grams/self.Faraday_constant)
return dg_dXd
def create_A_Borkovec (self):
if not hasattr(self, 'U'):
self.create_U ()
# HERE THE COLUMNS OF U are defined in the following way: Aqueous primary species (components) + Sorption primary species (components) + Electro components + Aqueous secondary species + Sorption Secondary species
# The rows are the components which are formulated in the order: Aqueous primary species (components) + Sorption primary species (components) + Electro components
#
# Two steps are necessary (Assuming that what is written about U is true):
# 1) U must be transpose
A_temp = self.U.transpose()
# Now A_temp is almost A: In the columns it has: Aqueous primary species (components) + Sorption primary species (components) + Electro components
# but the rows are in the following order: Aqueous primary species (components) + Sorption primary species (components) + Electro components + Aqueous secondary species + Sorption Secondary species
# The second step:
# 2) The order of the rows must be modified to be: Bulk part, basically aqueous part + Surface part, basically surface species
# Therefore it is decided to reorder in the folllowing way: Bulk: [Aqueous primary species (components)+ Aqueous secondary species] + Surface : [ Sorption primary species (components) + Sorption Secondary species]
# Furthermore, the row regarding the electrostatical potential that can be found in A_temp (A row made up of 0s) must be removed.
n_comp = self.length_aq_pri_sp + self.length_sorpt_pri_sp + self.length_names_elec_sorpt
ABulk = np.concatenate((A_temp[:self.length_aq_pri_sp, :], A_temp[n_comp : n_comp + self.length_aq_sec_sp, :]))
ASurface = np.concatenate ((A_temp[self.length_aq_pri_sp: self.length_aq_pri_sp + self.length_sorpt_pri_sp, :], A_temp[n_comp + self.length_aq_sec_sp :, :]))
self.A_Borkovec = np.concatenate((ABulk, ASurface))
self.A_Borkovec_columns = self.names_aq_pri_sp + self.names_sorpt_pri_sp + self.names_elec_sorpt
self.A_Borkovec_rows = self.names_aq_pri_sp + self.names_aq_sec_sp + self.names_sorpt_pri_sp + self.names_sorpt_sec_sp
def create_B_Borkovec (self, A, g):
'''
In Borkovec (1983), in table 2 is describe how the modified stoichiometry matrix B, must be build using A as model.
Precondition: A is order according to g, g is order according to first the aqueous primary species followed by the secondary aqueous species.
'''
Nsb = self.length_aq_pri_sp + self.length_aq_sec_sp
Ncb = self.length_aq_pri_sp
B = A.copy()
# Part A
DG = np.diag(g) + np.identity(Nsb)
ADG = np.matmul(DG,A[:Nsb, : Ncb])
B [:Nsb, :Ncb] = ADG
# Part B
count=0
for i in range(0, self.length_aq_pri_sp):
z = self.list_aq_pri_sp[i].charge
B[i,-1] = z*g[count]
count += 1
for i in range(0, self.length_aq_sec_sp):
z = self.list_aq_sec_sp[i].charge
B[count,-1] = z*g[count]
count += 1
return B
def calculate_u_electro (self, unknonw_boltzman_vect, C):
'''
T_depends in the surface sorption type somehow
'''
T_sigma = []
pos_point_electro_unknown = 0
for i in range(0, self.length_sorpt_pri_sp):
if self.list_sorpt_pri_sp[i].type_sorption == 'CCM':
x = unknonw_boltzman_vect [pos_point_electro_unknown]
psi = self.Boltzman_factor_2_psi(x)
charge_surface = self.list_sorpt_pri_sp[i].C1*psi
T = charge_surface*((self.list_sorpt_pri_sp[i].sp_surf_area*self.list_sorpt_pri_sp[i].solid_concentration_or_grams)/self.Faraday_constant)
T_sigma.append(T)
pos_point_electro_unknown += 1
elif self.list_sorpt_pri_sp[i].type_sorption == 'TLM':
x = unknonw_boltzman_vect [pos_point_electro_unknown : (pos_point_electro_unknown+3)]
psi = self.Boltzman_factor_2_psi(x)
charge_surface_0 = self.list_sorpt_pri_sp[i].C1*(psi[0]-psi[1])
charge_surface_b = self.list_sorpt_pri_sp[i].C1*(psi[1]-psi[0]) + self.list_sorpt_pri_sp[i].C2*(psi[1]-psi[2])
charge_surface_d = self.list_sorpt_pri_sp[i].C2*(psi[2]-psi[1])
#print(charge_surface_0 +charge_surface_b+charge_surface_d) Check that the sum of charges equals 0
#charge_surface_d = self.list_sorpt_pri_sp[i].C2*(psi[2]-psi[0])
D = (self.list_sorpt_pri_sp[i].sp_surf_area*self.list_sorpt_pri_sp[i].solid_concentration_or_grams)/self.Faraday_constant
T_0 = charge_surface_0*D
T_b = charge_surface_b*D
T_d = charge_surface_d*D
# In T_d, it is assigned Y_d equation 14 from Westall
pos_C = self.length_aq_pri_sp+self.length_sorpt_pri_sp+self.length_names_elec_sorpt
C_aq = np.concatenate((C[:self.length_aq_pri_sp], C[pos_C : (pos_C + self.length_aq_sec_sp)]))
I = self.calculate_ionic_strength(C_aq)
B = np.sqrt(8*self.permittivity_free_space*self.dielectric_constant*self.universal_gas_constant*self.temperature*I)
E = np.sinh((self.Faraday_constant*psi[2])/(2*(self.universal_gas_constant*self.temperature)))
Y = B*E
#print(Y-T_d)
# print(Y+T_d) I have an existencial doubt about these part.
#print(charge_surface_d+Y)
#
T_sigma.append(T_0); T_sigma.append(T_b);
T_sigma.append(Y+T_d)
#T_sigma.append( charge_surface_d+T_d)
pos_point_electro_unknown += 3
#print([T_sigma])
return np.array(T_sigma)
def Boltzman_factor_2_psi (self, x):
D = self.universal_gas_constant*self.temperature
psi = - np.log(x)*(D/self.Faraday_constant)
return psi
def create_sorpt_vec (self):
T_sorpt = []
for i in range(0, self.length_sorpt_pri_sp):
T_sorpt.append(self.list_sorpt_pri_sp[i].T_solid)
return T_sorpt
def Jacobian_Speciation_Westall1980 (self, C, n_aq_plus_n_sorpt, n_primaryspecies):
'''
The jacobian matrix following an implementation based on the algorithm of Westall (1980)
"Chemical equilibrium Including Adsorption on Charged Surfaces"
Pages 37-to-39
It is assumed that C is order first with the primary species and then with the secondary species such as C = [C1 C2]
'''
# The first part treats all terms as it was a normal speciation
Z = np.zeros((n_primaryspecies, n_primaryspecies))
for i in range(0, n_primaryspecies):
for j in range(0, n_primaryspecies):
Z[i,j]= np.matmul(np.multiply(self.U[i,:], self.U[j,:]), (C/C[j]))
# According to the point 2 of Table III of Westall the term C*sa/F*RT/Funknwon must be added to the electrostatic part
# I am supposing here that all the sorption phases are CCM
for i in range(0, self.length_sorpt_pri_sp):
pos_unknown_vector = n_aq_plus_n_sorpt
# I am supposing here that the sorption phases are CCM
if self.list_sorpt_pri_sp[i].type_sorption == 'CCM':
D1 = self.universal_gas_constant*self.temperature
D2 = self.Faraday_constant*C[pos_unknown_vector]
F = ((self.list_sorpt_pri_sp[i].sp_surf_area*self.list_sorpt_pri_sp[i].solid_concentration_or_grams)/self.Faraday_constant)
Z[pos_unknown_vector,pos_unknown_vector] = Z[pos_unknown_vector, pos_unknown_vector] + (self.list_sorpt_pri_sp[i].C1*F)*(D1/D2)
pos_unknown_vector += 1
# I am supposing here that the sorption phases are TLM
elif self.list_sorpt_pri_sp[i].type_sorption == 'TLM':
D1 = self.universal_gas_constant*self.temperature
D2 = self.Faraday_constant*C[pos_unknown_vector]
D3 = self.Faraday_constant*C[pos_unknown_vector+1]
D4 = self.Faraday_constant*C[pos_unknown_vector+2]
F = ((self.list_sorpt_pri_sp[i].sp_surf_area*self.list_sorpt_pri_sp[i].solid_concentration_or_grams)/self.Faraday_constant)
# O-plane
# plane 0 - 0
Z[pos_unknown_vector,pos_unknown_vector] = Z[pos_unknown_vector, pos_unknown_vector] + (self.list_sorpt_pri_sp[i].C1*F)*(D1/D2)
# plane 0 - b
Z[pos_unknown_vector,pos_unknown_vector+1] = Z[pos_unknown_vector, pos_unknown_vector+1] - (self.list_sorpt_pri_sp[i].C1*F)*(D1/D3)
# plane 0 - d
# plane b - 0
Z[pos_unknown_vector + 1,pos_unknown_vector] = Z[pos_unknown_vector + 1,pos_unknown_vector] - (self.list_sorpt_pri_sp[i].C1*F)*(D1/D2)
# plane b - b
Z[pos_unknown_vector + 1,pos_unknown_vector + 1] = Z[pos_unknown_vector + 1,pos_unknown_vector + 1] + ((self.list_sorpt_pri_sp[i].C1+self.list_sorpt_pri_sp[i].C2)*F)*(D1/D3)
# plane b - d
Z[pos_unknown_vector + 1,pos_unknown_vector + 2] = Z[pos_unknown_vector + 1,pos_unknown_vector + 2] - (self.list_sorpt_pri_sp[i].C2*F)*(D1/D4)
# plane d - 0
# plane d - b
Z[pos_unknown_vector + 2,pos_unknown_vector + 1] = Z[pos_unknown_vector + 2,pos_unknown_vector + 1] - (self.list_sorpt_pri_sp[i].C2*F)*(D1/D3)
# plane d - d
###### ---This part below is what is written in the paper of Westall
# A = -F/(2*R*T)
#param = self.Faraday_constant/(2*(self.universal_gas_constant*self.temperature))
#A = -param
#
#pos_C = self.length_aq_pri_sp+self.length_sorpt_pri_sp+self.length_names_elec_sorpt
#C_aq = np.concatenate((C[:self.length_aq_pri_sp], C[pos_C : (pos_C + self.length_aq_sec_sp)]))
#
#I = self.calculate_ionic_strength(C_aq)
#B = np.sqrt(8*self.permittivity_free_space*self.dielectric_constant*self.universal_gas_constant*self.temperature*I)
#psi_d = self.Boltzman_factor_2_psi(C[pos_unknown_vector+2])
#par_C = param*psi_d
#C = np.cosh(par_C)
#F_d = A*B*C
########## This part below is my own assumption, since I think that the equation given by the paper is wrong derivated.
pos_C = self.length_aq_pri_sp+self.length_sorpt_pri_sp+self.length_names_elec_sorpt
C_aq = np.concatenate((C[:self.length_aq_pri_sp], C[pos_C : (pos_C + self.length_aq_sec_sp)]))
I = self.calculate_ionic_strength(C_aq)
B = np.sqrt(8*self.permittivity_free_space*self.dielectric_constant*self.universal_gas_constant*self.temperature*I)
in_cosh = -np.log(C[pos_unknown_vector+2])/2
F_d = (B/2)*np.cosh(in_cosh)*(1/C[pos_unknown_vector+2])
Z[pos_unknown_vector + 2,pos_unknown_vector + 2] = F_d + (self.list_sorpt_pri_sp[i].C2*F)*(D1/D4)
pos_unknown_vector +=3
return Z
def calculate_ionic_strength (self,c):
'''
Calculate the ion strength: The vector C is supossed to be a vector of concentrations that contains first the aqueous primary species followed by the aqueous secondary species.
Both primary and secondary species are supossed to be order in the same order that the one of the class, namely self.
'''
if self.ionic_strength_constant:
return self.fix_ionic_strength
Ionic_s=0
count = 0
for i in range(0, self.length_aq_pri_sp):
# if type(self.list_aq_pri_sp[i]) == Aq_Species:
z = self.list_aq_pri_sp[i].charge
Ionic_s = Ionic_s + c[count]*z*z
count += 1
for i in range(0, self.length_aq_sec_sp):
# if type(self.list_aq_sec_sp[i]) == Aq_Species:
z = self.list_aq_sec_sp[i].charge
Ionic_s = Ionic_s + c[count]*z*z
count += 1
Ionic_s = 0.5*Ionic_s
return Ionic_s
def calculate_log_activity_coefficient_aq_pri_species (self, ionic_strength):
log_coef_a=np.zeros(self.length_aq_pri_sp)
for i in range(0, self.length_aq_pri_sp):
if self.list_aq_pri_sp[i].name == 'H2O':
# water has not coefficient activity (or it is 0). For water the activity is calculated directly with Garrels and Christ (1965) forumla
log_coef_a[i] = 0
else:
log_coef_a[i] = self.list_aq_pri_sp[i].log_coefficient_activity(ionic_strength, A=self.A_activitypar, B = self.B_activitypar )
return log_coef_a
def calculate_log_activity_coefficient_aq_sec_species (self, ionic_strength):
log_coef_a=np.zeros(self.length_aq_sec_sp)
for i in range(0, self.length_aq_sec_sp):
if self.list_aq_sec_sp[i].name == 'H2O':
# water has not coefficient activity (or it is 0). For water the activity is calculated directly with Garrels and Christ (1965) forumla
log_coef_a[i] = 0
else:
log_coef_a[i] = self.list_aq_sec_sp[i].log_coefficient_activity(ionic_strength, A=self.A_activitypar, B = self.B_activitypar )
return log_coef_a
def Bethke_algorithm (self, tolerance = 1e-6, max_n_iterations = 100, tolerance_psi = 1e-6, max_n_iterations_psi = 800, tolerance_big_loop = 1e-6, max_n_iterations_big_loop = 100):
'''
These algortihm implementation is based on Geochemical and Biogeochemical reaction modeling from <NAME>
section_10.3
'''
# Check that water is on the database as primary species and has the first possition
# So far, for simplicity I leave the thing with the H2O like that but it can be changed.
ind = self.names_aq_pri_sp.index('H2O')
if not (ind == 0):
raise ValueError('[ChemSys/bethke_algorithm] -->To use this algortihm water must be on the first position of the primary species. \n')
'''
Separates primary and Secondary species matrices.
e.g.:
H2O sp_i sp_p sp_elec sp_j sp_q
R1 || x11 x1ni x1np x1nelec x1nj x1nq || || x11 x12 || || x11 ||
S = R2 || x21 x2ni x2np x2nelec x2nj x2nq || in to S1 = || x21 x22 || and S2= || x21 ||
R3 || x31 x3ni x3np x3nelec x3nj x3nq || || x31 x32 || || x32 ||
where rows R are reactions, and columns are H2O (water), sp_i (aqueous primary species), sp_p (sorption primary species - "uncomplexed" so-labelled by Bethe),
sp_elec (Electrostatic part, Boltzman factor of the sorption charge), sp_j (aqueous secondary species), sp_q (sorption secondary species)
These part can be separated in to S1 and S2:
|| x11 x1ni x1np x1nelec || || x1nj x1nq ||
S1 = || x21 x2ni x2np x2nelec || and S2= || x2nj x2nq ||
|| x31 x3ni x3np x3nelec || || x3nj x3nq ||
# Or in S1, S2, S3 different ---> These S, separation will be more clear once the algorithm is done.
The U is defined:
H2O sp_i sp_p sp_elec sp_j sp_q
∑ H2O || 1 0 0 0 v_wj v_wq ||
U = V = ∑ sp_i || 0 I 0 0 v_ij v_iq ||
∑ sp_p || 0 0 I 0 0 v_pq ||
∑ sp_elec || 0 0 0 0 0 z_q ||
I call U = V because the nomenclature used by the Bethe
As before, the algorithm can be divided in different U parts. For instance the last row, using the algorithm provided by Bethe must be decoupled of the matrix.
'''
# Instantiation of first guesses
nw = 1 # So we are supossing that the initial amount of water is 1, actually it must change
mi = (0.9*np.array(self.aq_u_vector[1:]))*nw
Mp= self.create_sorpt_vec()
mp = (0.9*np.array(Mp))
Boltzfactor = np.ones(self.length_names_elec_sorpt) # Boltzfactor ==> exp(-psi*F/RT) Later I will need psi but not yet. Now I only need the boltzman factor for mj and mp guesses
S1, S2 = self.separte_S_into_S1_and_S2()
S_prima = -np.matmul(linalg.inv(S2),S1)
log_K_prima = np.matmul(linalg.inv(S2), self.log_k_vector)
ionic_strength = 0 # self.calculate_ionic_strength (c_aqueouspecies)
log_a_water = np.log10(1-(0.018*np.sum(mi))) # calculating the log activity of water (water has not coefficient)
log_a_coeff_aq_pri_sp = self.calculate_log_activity_coefficient_aq_pri_species (ionic_strength)
log_a_coeff_aq_sec_sp = self.calculate_log_activity_coefficient_aq_sec_species (ionic_strength)
mj_and_mq = self.log_speciation_secondaryspecies_Bethke (log_a_water, log_a_coeff_aq_pri_sp, log_a_coeff_aq_sec_sp, mi, mp, Boltzfactor,S_prima, log_K_prima)
mj_and_mq = 10**mj_and_mq
# separation
mj = mj_and_mq[:self.length_aq_sec_sp]
mq = mj_and_mq[self.length_aq_sec_sp:]
## Other parameters that must be calculated and are constant during the loops
# length values
length_aq_sorpt_pri = self.length_aq_pri_sp + self.length_sorpt_pri_sp
length_pri = self.length_aq_pri_sp + self.length_sorpt_pri_sp + self.length_names_elec_sorpt
# matrix that are keep constant through the loops
U2 = self.U[:, length_pri:]
M = np.concatenate((self.aq_u_vector, Mp)) # The given component value for aqueous and surface species
WV_and_WP= np.multiply(U2[0,:], U2[1:length_aq_sorpt_pri,:]) # The matrix WV and WP contain the terms v_wj*v_ij, v_wq*v_iq and the terms v_wq*v_pq
I = np.identity(length_aq_sorpt_pri-1) # The delta of equation (10.33) of <NAME>'s book
Area_v = self.calculate_A_sf_Bethke()
charge_background_solute = 1
c_minus1 = np.zeros(length_pri + self.length_aq_sec_sp + self.length_sorpt_sec_sp)
## I have 2 loops: 1) It is a Newton-Raphson methods that must be solved. Once solved, the values are used to calculate a new value of the surface potential.
# So this 2 loops are contained in other loop
err_big_loop = 1
counter_iterations_big_loop = 0
while err_big_loop> tolerance_big_loop and counter_iterations_big_loop < max_n_iterations_big_loop:
# Ini error parameter
err = 1
counter_iterations = 0;
# First loop, Newton-Raphson
while err>tolerance and counter_iterations < max_n_iterations:
#### Residual vector ####
# water #####
Jww = 55.5 + np.dot(U2[0,:],mj_and_mq)
rw = nw*Jww
# aqueous primary species ####
Jiw = mi + np.matmul(U2[1:self.length_aq_pri_sp,:], mj_and_mq)
ri = nw*Jiw
# sorption primary species ####
Jpw = mp + np.matmul(U2[self.length_aq_pri_sp:length_aq_sorpt_pri,:], mj_and_mq) # Actually it should be only ∑_q v_pq*mq but the terms of v_pj are 0 (At least theoretically, I hope). So the equaiton is ok.
rp = nw*Jpw
# assamble
r = np.concatenate(([rw], ri, rp))
# R functions evaluated
R = r - M
print(R)
####### Jacobian matrix #########
# parameters Jww, Jiw, and Jpw already calculated
# Jwp and Jwq are calculated together due to the U matrix is defined by using WV_and_WP*mj_and_mq
jwp_and_jwq = np.matmul(WV_and_WP,mj_and_mq)
mi_and_mp = np.concatenate((mi,mp))
Jwp_and_Jwq = np.multiply((nw/mi_and_mp), jwp_and_jwq)
# If my intuition do not fool me, it should be possible to calculate the part of the Jacobian matrix [equation (10.34) of Craig books'] that comprises the terms Jii', Jip, Jpi, and Jpp'
# In the same way that Jii when having only speciaiton (section 4 of the book) or in the same way that Jwp and Jwq was possible to be calculated together.
Jii_Jip_and_Jpi_Jpp = nw*I + nw*self.Js_partB_calculation(mi_and_mp, mj_and_mq, U2[1:length_aq_sorpt_pri,:])
# Assembling
Jw = np.concatenate(([Jww],Jiw,Jpw))
Jip = np.vstack((Jwp_and_Jwq, Jii_Jip_and_Jpi_Jpp))
J = np.c_[Jw,Jip]
# Solution Newthon-Raphson
delta_c = linalg.solve(J,-R)
err = max(abs(delta_c))
#print(err)
# relaxation factor
max_1 = 1;
max_2 =(-2*delta_c[0])/nw
max_3 = np.amax(-2*np.multiply(delta_c[1:self.length_aq_pri_sp], 1/mi))
max_4 = np.amax(-2*np.multiply(delta_c[self.length_aq_pri_sp:], 1/mp))
Max_f = np.amax([max_1, max_2, max_3, max_4])
Del_mul = 1/Max_f
# Update guesses
nw = nw + Del_mul*delta_c[0]
mi = mi + Del_mul*delta_c[1:self.length_aq_pri_sp]
mp = mp + Del_mul*delta_c[self.length_aq_pri_sp:]
# Update secondaries
ionic_strength = self.calculate_ionic_strength (np.concatenate(([55.5], mi, mj)))
log_a_water = np.log10(1-(0.018*(np.sum(mi)+np.sum(mj)))) # calculating the log activity of water (water has not coefficient)
log_a_coeff_aq_pri_sp = self.calculate_log_activity_coefficient_aq_pri_species (ionic_strength)
log_a_coeff_aq_sec_sp = self.calculate_log_activity_coefficient_aq_sec_species (ionic_strength)
mj_and_mq = self.log_speciation_secondaryspecies_Bethke (log_a_water, log_a_coeff_aq_pri_sp, log_a_coeff_aq_sec_sp, mi, mp, Boltzfactor,S_prima, log_K_prima)
mj_and_mq = 10**mj_and_mq
mj = mj_and_mq[:self.length_aq_sec_sp]
mq = mj_and_mq[self.length_aq_sec_sp:]
counter_iterations += 1
if counter_iterations >= max_n_iterations:
raise ValueError('Max number of iterations in chemistry part surpassed.')
# First loop terminated. Chemistry values establish
# Second loop, loop of values of the psi must be started
#### SECOND ITERATION LOOP #####
# Newton approach for the psi potential
# Parameter before loop
a = | np.matmul(U2[length_aq_sorpt_pri,self.length_aq_sec_sp:], mq) | numpy.matmul |
##############################################################################
# CODE OF MARIA #
##############################################################################
import gym
import numpy as np
import sys
import os
import time
import pandas
import random
import pickle
import itertools
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.model_selection import ParameterGrid
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_approximation import RBFSampler
def feature_function(env, state, action):
row, col = state // 4, state % 4
row, col = int(row / 7), int(col / 7)
state_prox = env.P[state][action][0][1]
row_prox, col_prox = state_prox // 4, state_prox % 4
row_prox, col_prox = int(row_prox / 7), int(col_prox / 7)
#features = np.array([1, row, col, row**2, col**2], dtype='float64')
features = np.zeros(64)
features[state] = 1
action_features = np.zeros(64, dtype=np.float64)
action_features[state_prox] = 1
#if state == 63:
# action_features[state_prox] = 0
features = np.concatenate([features, action_features])
return features
def linear_regression(x, w):
return np.dot(w, x)
def choose_action(env, s, actions, w, epsilon):
action_values = np.zeros(len(actions), dtype=np.float64)
for action in actions:
x = feature_function(env, s, action)
action_values[action] = linear_regression(x, w)
if np.random.rand() < epsilon:
selected = np.random.choice(len(actions))
else:
selected = np.random.choice(np.argwhere(
action_values == np.max(action_values)).ravel())
return selected
def sarsa_lambda_approx(env, episodes=1000, discount=0.9, alpha=0.01, trace_decay=0.9,
epsilon=0.1):
number_actions = env.nA
actions = np.arange(number_actions)
x = feature_function(env, 0, 0)
n_features = len(x)
w = np.zeros(n_features) + 0.0001
stats = np.zeros(episodes)
for episode in range(episodes):
aux = 0
state = env.reset() # Always state=0
action = choose_action(env, state, actions, w, epsilon)
x = feature_function(env, state, action)
z = np.zeros(n_features)
q_prev = 0
for t in itertools.count():
aux += 1
state_next, reward, done, _ = env.step(action)
action_next = choose_action(env, state_next, actions, w, epsilon)
x_next = feature_function(env, state_next, action_next)
q = linear_regression(x, w)
q_next = linear_regression(x_next, w)
if done and reward == 0:
reward = -1
delta = reward + discount * q_next - q
z = discount * trace_decay * z + \
(1 - alpha * discount * trace_decay * np.dot(z, x)) * x
w = w + alpha * (delta + q - q_prev) * z - alpha * (q - q_prev) * x
# w = w + alpha * delta * x
q_prev = q_next
x = x_next
action = action_next
stats[episode] += reward
# env.render()
if done:
if reward == 1:
reward = 1
# print("episode, aux", episode, aux, reward)
# else:
# print('Episode ended: agent fell in the lake')
break
return w, stats
def generate_stats_sarsa_approx(env, w_, episodes=100, discount=0.99, alpha=0.01, trace_decay=0.9,
epsilon=0.01, display=False):
number_actions = env.nA
actions = np.arange(number_actions)
x = feature_function(env, 0, 0)
n_features = len(x)
w = w_
win_ = 0
for episode in range(episodes):
aux = 0
state = env.reset() # Always state=0
action = choose_action(env, state, actions, w, epsilon)
x = feature_function(env, state, action)
z = | np.zeros(n_features) | numpy.zeros |
"""
Test cases for the regi0.geographic.utils.is_outlier function.
"""
import numpy as np
import pytest
from regi0.geographic.utils import is_outlier
@pytest.fixture()
def values():
return | np.array([52, 56, 53, 57, 51, 59, 1, 99]) | numpy.array |
"""
Copyright (c) 2010-2018 CNRS / Centre de Recherche Astrophysique de Lyon
Copyright (c) 2012-2017 <NAME> <<EMAIL>>
Copyright (c) 2014-2019 <NAME> <<EMAIL>>
Copyright (c) 2016 <NAME> <<EMAIL>>
Copyright (c) 2016-2019 <NAME> <<EMAIL>>
Copyright (c) 2018-2019 <NAME> <<EMAIL>>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import types
import astropy.units as u
from astropy.io import fits
from astropy.stats import gaussian_sigma_to_fwhm, gaussian_fwhm_to_sigma
from astropy.convolution import convolve, Box1DKernel
from os.path import join, abspath, dirname
from scipy import interpolate, signal
from scipy.optimize import leastsq
from . import ABmag_filters, wavelet1D
from .arithmetic import ArithmeticMixin
from .data import DataArray
from .fitting import Gauss1D
from .objs import flux2mag
__all__ = ('Spectrum', 'vactoair', 'airtovac')
def vactoair(vacwl):
"""Calculate the approximate wavelength in air for vacuum wavelengths.
Parameters
----------
vacwl : ndarray
Vacuum wavelengths.
This uses an approximate formula from the IDL astronomy library
https://idlastro.gsfc.nasa.gov/ftp/pro/astro/vactoair.pro
"""
wave2 = vacwl * vacwl
n = 1.0 + 2.735182e-4 + 131.4182 / wave2 + 2.76249e8 / (wave2 * wave2)
# Do not extrapolate to very short wavelengths.
if not isinstance(vacwl, np.ndarray):
if vacwl < 2000:
n = 1.0
else:
ignore = np.where(vacwl < 2000)
n[ignore] = 1.0
return vacwl / n
def airtovac(airwl):
"""Convert air wavelengths to vacuum wavelengths.
Parameters
----------
vacwl : ndarray
Vacuum wavelengths.
This uses the IAU standard as implemented in the IDL astronomy library
https://idlastro.gsfc.nasa.gov/ftp/pro/astro/airtovac.pro
"""
sigma2 = (1e4 / airwl)**2. # Convert to wavenumber squared
n = 1.0 + (6.4328e-5 + 2.94981e-2 / (146. - sigma2) +
2.5540e-4 / (41. - sigma2))
if not isinstance(airwl, np.ndarray):
if airwl < 2000:
n = 1.0
else:
ignore = np.where(airwl < 2000)
n[ignore] = 1.0
return airwl * n
class Spectrum(ArithmeticMixin, DataArray):
"""Spectrum objects contain 1D arrays of numbers, optionally
accompanied by corresponding variances. These numbers represent
sample fluxes along a regularly spaced grid of wavelengths.
The spectral pixel values and their variances, if any, are
available as arrays[q that can be accessed via properties of the
Spectrum object called .data and .var, respectively. These arrays
are usually masked arrays, which share a boolean masking array
that can be accessed via a property called .mask. In principle,
these arrays can also be normal numpy arrays without masks, in
which case the .mask property holds the value,
numpy.ma.nomask. However non-masked arrays are only supported by a
subset of mpdaf functions at this time, so masked arrays should be
used where possible.
When a new Spectrum object is created, the data, variance and mask
arrays can either be specified as arguments, or the name of a FITS
file can be provided to load them from.
Parameters
----------
filename : string
An optional FITS file name from which to load the spectrum.
None by default. This argument is ignored if the data
argument is not None.
ext : int or (int,int) or string or (string,string)
The optional number/name of the data extension
or the numbers/names of the data and variance extensions.
wave : `mpdaf.obj.WaveCoord`
The wavelength coordinates of the spectrum.
unit : str or `astropy.units.Unit`
The physical units of the data values. Defaults to
`astropy.units.dimensionless_unscaled`.
data : float array
An optional 1 dimensional array containing the values of each
pixel of the spectrum, stored in ascending order of wavelength
(None by default). Where given, this array should be 1
dimensional.
var : float array
An optional 1 dimensional array containing the estimated
variances of each pixel of the spectrum, stored in ascending
order of wavelength (None by default).
Attributes
----------
filename : string
The name of the originating FITS file, if any. Otherwise None.
unit : `astropy.units.Unit`
The physical units of the data values.
primary_header : `astropy.io.fits.Header`
The FITS primary header instance, if a FITS file was provided.
data_header : `astropy.io.fits.Header`
The FITS header of the DATA extension.
wave : `mpdaf.obj.WaveCoord`
The wavelength coordinates of the spectrum.
"""
# Tell the DataArray base-class that Spectrum objects require 1 dimensional
# data arrays and wavelength coordinates.
_ndim_required = 1
_has_wave = True
def subspec(self, lmin, lmax=None, unit=u.angstrom):
"""Return the flux at a given wavelength, or the sub-spectrum
of a specified wavelength range.
A single flux value is returned if the lmax argument is None
(the default), or if the wavelengths assigned to the lmin and
lmax arguments are both within the same pixel. The value that
is returned is the value of the pixel whose wavelength is
closest to the wavelength specified by the lmin argument.
Note that is a wavelength range is asked for, a view on the original
spectrum is returned and both will be modified at the same time. If
you need to modify only the sub-spectrum, you'll need to copy() it
before.
Parameters
----------
lmin : float
The minimum wavelength of a wavelength range, or the wavelength
of a single pixel if lmax is None.
lmax : float or None
The maximum wavelength of the wavelength range.
unit : `astropy.units.Unit`
The wavelength units of the lmin and lmax arguments. The
default is angstroms. If unit is None, then lmin and lmax
are interpreted as array indexes within the spectrum.
Returns
-------
out : float or `~mpdaf.obj.Spectrum`
"""
if self.wave is None:
raise ValueError('Operation forbidden without world coordinates '
'along the spectral direction')
if lmax is None:
lmax = lmin
# Are lmin and lmax array indexes?
if unit is None:
pix_min = max(0, int(lmin + 0.5))
pix_max = min(self.shape[0], int(lmax + 0.5))
# Convert wavelengths to the nearest spectrum array indexes.
else:
pix_min = max(0, self.wave.pixel(lmin, nearest=True, unit=unit))
pix_max = min(self.shape[0],
self.wave.pixel(lmax, nearest=True, unit=unit) + 1)
# If the start and end of the wavelength range select the same pixel,
# return just the value of that pixel.
if (pix_min + 1) == pix_max:
return self[pix_min]
# Otherwise return a sub-spectrum.
else:
return self[pix_min:pix_max]
def get_step(self, unit=None):
"""Return the wavelength step size.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned step-size.
Returns
-------
out : float
The width of a spectrum pixel.
"""
if self.wave is not None:
return self.wave.get_step(unit)
def get_start(self, unit=None):
"""Return the wavelength value of the first pixel of the spectrum.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned wavelength.
Returns
-------
out : float
The wavelength of the first pixel of the spectrum.
"""
if self.wave is not None:
return self.wave.get_start(unit)
def get_end(self, unit=None):
"""Return the wavelength of the last pixel of the spectrum.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned wavelength.
Returns
-------
out : float
The wavelength of the final pixel of the spectrum.
"""
if self.wave is not None:
return self.wave.get_end(unit)
def get_range(self, unit=None):
"""Return the wavelength range (Lambda_min, Lambda_max) of the spectrum.
Parameters
----------
unit : `astropy.units.Unit`
The units of the returned wavelengths.
Returns
-------
out : float array
The minimum and maximum wavelengths.
"""
if self.wave is not None:
return self.wave.get_range(unit)
def mask_region(self, lmin=None, lmax=None, inside=True, unit=u.angstrom):
"""Mask spectrum pixels inside or outside a wavelength range, [lmin,lmax].
Parameters
----------
lmin : float
The minimum wavelength of the range, or None to choose the
wavelength of the first pixel in the spectrum.
lmax : float
The maximum wavelength of the range, or None to choose the
wavelength of the last pixel in the spectrum.
unit : `astropy.units.Unit`
The wavelength units of lmin and lmax. If None, lmin and
lmax are assumed to be pixel indexes.
inside : bool
If True, pixels inside the range [lmin,lmax] are masked.
If False, pixels outside the range [lmin,lmax] are masked.
"""
if self.wave is None:
raise ValueError('Operation forbidden without world coordinates '
'along the spectral direction')
else:
if lmin is None:
pix_min = 0
else:
if unit is None:
pix_min = max(0, int(lmin + 0.5))
else:
pix_min = max(0, self.wave.pixel(lmin, nearest=True,
unit=unit))
if lmax is None:
pix_max = self.shape[0]
else:
if unit is None:
pix_max = min(self.shape[0], int(lmax + 0.5))
else:
pix_max = min(self.shape[0],
self.wave.pixel(lmax, nearest=True,
unit=unit) + 1)
if inside:
self.data[pix_min:pix_max] = np.ma.masked
else:
self.data[:pix_min] = np.ma.masked
self.data[pix_max + 1:] = np.ma.masked
def _wavelengths_to_slice(self, lmin, lmax, unit):
"""Return the slice that selects a specified wavelength range.
Parameters
----------
lmin : float
The minimum wavelength of a wavelength range, or the wavelength
of a single pixel if lmax is None.
lmax : float or None
The maximum wavelength of the wavelength range.
unit : `astropy.units.Unit`
The wavelength units of the lmin and lmax arguments. The
default is angstroms. If unit is None, then lmin and lmax
are interpreted as array indexes within the spectrum.
Returns
-------
out : slice
The slice needed to select pixels within the specified wavelength
range.
"""
if unit is not None and self.wave is None:
raise ValueError('Operation forbidden without world coordinates '
'along the spectral direction')
# Get the pixel index that corresponds to the minimum wavelength.
if lmin is None:
i1 = 0
else:
if unit is None:
if lmin > self.shape[0]:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i1 = max(0, int(lmin + 0.5))
else:
i1 = self.wave.pixel(lmin, nearest=False, unit=unit)
if i1 > self.shape[0]:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i1 = self.wave.pixel(lmin, nearest=True, unit=unit)
# Get the pixel index that corresponds to the maximum wavelength.
if lmax is None:
i2 = self.shape[0]
else:
if unit is None:
if lmax < 0:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i2 = min(self.shape[0], int(lmax + 0.5))
else:
i2 = self.wave.pixel(lmax, nearest=False, unit=unit)
if i2 < 0:
raise ValueError('Minimum and maximum wavelengths '
'are outside the spectrum range')
i2 = self.wave.pixel(lmax, nearest=True, unit=unit) + 1
return slice(i1, i2)
def _interp(self, wavelengths, spline=False):
"""return the interpolated values corresponding to the wavelength
array.
Parameters
----------
wavelengths : array of float
wavelength values
unit : `astropy.units.Unit`
Type of the wavelength coordinates
spline : bool
False: linear interpolation (use `scipy.interpolate.interp1d`),
True: spline interpolation (use `scipy.interpolate.splrep`
and `scipy.interpolate.splev`).
"""
lbda = self.wave.coord()
data = np.pad(self.data.compressed(), 1, 'edge')
w = np.concatenate(([self.get_start() - 0.5 * self.get_step()],
np.compress(~self._mask, lbda),
[self.get_end() + 0.5 * self.get_step()]))
if spline:
if self._var is not None:
_weight = 1. / np.sqrt(np.abs(self.var.filled(np.inf)))
if self.mask is np.ma.nomask:
weight = np.empty(self.shape + 2, dtype=float)
weight[1:-1] = _weight
else:
ksel = np.where(self.mask == False)
weight = np.empty(np.shape(ksel)[1] + 2)
weight[1:-1] = _weight[ksel]
weight[0] = weight[1]
weight[-1] = weight[-2]
else:
weight = None
tck = interpolate.splrep(w, data, w=weight)
return interpolate.splev(wavelengths, tck, der=0)
else:
f = interpolate.interp1d(w, data)
return f(wavelengths)
def _interp_data(self, spline=False):
"""Return data array with interpolated values for masked pixels.
Parameters
----------
spline : bool
False: linear interpolation (use `scipy.interpolate.interp1d`),
True: spline interpolation (use `scipy.interpolate.splrep`
and `scipy.interpolate.splev`).
"""
if np.count_nonzero(self._mask) in (0, self.shape[0]):
return self._data
lbda = self.wave.coord()
wnew = lbda[self._mask]
data = self._data.copy()
data[self._mask] = self._interp(wnew, spline)
return data
def interp_mask(self, spline=False):
"""Interpolate masked pixels.
Parameters
----------
spline : bool
False: linear interpolation (use `scipy.interpolate.interp1d`),
True: spline interpolation (use `scipy.interpolate.splrep`
and `scipy.interpolate.splev`).
"""
self.data = np.ma.masked_invalid(self._interp_data(spline))
def rebin(self, factor, margin='center', inplace=False):
"""Combine neighboring pixels to reduce the size of a spectrum by an
integer factor.
Each output pixel is the mean of n pixels, where n is the
specified reduction factor.
Parameters
----------
factor : int
The integer reduction factor by which the spectrum should
be shrunk.
margin : string in 'center'|'right'|'left'|'origin'
When the dimension of the input spectrum is not an integer
multiple of the reduction factor, the spectrum is
truncated to remove just enough pixels that its length is
a multiple of the reduction factor. This sub-spectrum is
then rebinned in place of the original spectrum. The
margin parameter determines which pixels of the input
spectrum are truncated, and which remain.
The options are:
'origin' or 'center':
The start of the output spectrum is coincident
with the start of the input spectrum.
'center':
The center of the output spectrum is aligned
with the center of the input spectrum, within
one pixel.
'right':
The end of the output spectrum is coincident
with the end of the input spectrum.
inplace : bool
If False, return a rebinned copy of the spectrum (the default).
If True, rebin the original spectrum in-place, and return that.
Returns
-------
out : Spectrum
"""
# Delegate the rebinning to the generic DataArray function.
return self._rebin(factor, margin, inplace)
def _decimation_filter(self, newstep, atten, unit=None):
"""This is a private function Spectrum.resample(), used to apply
a decimation filter prior to resampling.
Parameters
----------
step : float
The new pixel size along the wavelength axis of the spectrum.
atten : float
The minimum attenuation (dB), of the antialiasing
decimation filter at the Nyquist folding frequency of the
new pixel size. Larger attenuations suppress aliasing
better at the expense of worsened resolution. A good value
to choose is 40dB, which produces a response that is very
similar to a blackman filter applied within the Fourier
plane, but with less ringing in the image plane.
unit : `astropy.units.Unit`
The wavelength units of the step argument. A value of None
is equivalent to specifying self.wave.unit.
"""
# Convert the attenuation from dB to a linear scale factor.
gcut = 10.0**(-atten / 20.0)
# Calculate the Nyquist folding frequency of the new pixel size.
nyquist_folding_freq = 0.5 / newstep
# Calculate the standard deviation of a Gaussian whose Fourier
# transform drops from unity at the center to gcut at the Nyquist
# folding frequency.
sigma = (0.5 / np.pi / nyquist_folding_freq *
np.sqrt(-2.0 * np.log(gcut)))
# Convert the standard deviation from wavelength units to input pixels.
sigma /= self.get_step(unit=unit)
# Choose dimensions for the gaussian filtering kernel. Choose an
# extent from -4*sigma to +4*sigma. This truncates the gaussian
# where it drops to about 3e-4 of its peak. The following
# calculation ensures that the dimensions of the array are odd, so
# that the gaussian will be symmetrically sampled either side of a
# central pixel. This prevents spectral shifts.
gshape = int(np.ceil(4.0 * sigma)) * 2 + 1
# fftconvolve requires that the kernel be no larger than the array
# that it is convolving, so reduce the size of the kernel array if
# needed. Be careful to choose an odd sized array.
n = self.shape[0]
if gshape > n:
gshape = n if n % 2 != 0 else (n - 1)
# Sample the gaussian filter symmetrically around the central pixel.
gx = np.arange(gshape, dtype=float) - gshape // 2
gy = np.exp(-0.5 * (gx / sigma)**2)
# Area-normalize the gaussian profile.
gy /= gy.sum()
# Filter the spectrum with the gaussian filter.
self.fftconvolve(gy, inplace=True)
def resample(self, step, start=None, shape=None, unit=u.angstrom,
inplace=False, atten=40.0, cutoff=0.25):
"""Resample a spectrum to have a different wavelength interval.
Parameters
----------
step : float
The new pixel size along the wavelength axis of the spectrum.
start : float
The wavelength at the center of the first pixel of the resampled
spectrum. If None (the default) the center of the first pixel
has the same wavelength before and after resampling.
unit : `astropy.units.Unit`
The wavelength units of the step and start arguments.
The default is u.angstrom.
shape : int
The dimension of the array of the new spectrum (ie. the number
of spectral pixels). If this is not specified, the shape is
selected to encompass the wavelength range from the chosen
start wavelength to the ending wavelength of the input spectrum.
inplace : bool
If False, return a resampled copy of the spectrum (the default).
If True, resample the original spectrum in-place, and return that.
atten : float
The minimum attenuation (dB), of the antialiasing
decimation filter at the Nyquist folding frequency of the
new pixel size. Larger attenuations suppress aliasing
better at the expense of worsened resolution. The default
attenuation is 40.0 dB. To disable antialiasing, specify
atten=0.0.
cutoff : float
Mask each output pixel of which at least this fraction of the
pixel was interpolated from masked input pixels.
Returns
-------
out : Spectrum
"""
out = self if inplace else self.copy()
# Don't allow the spectrum to be started beyond the far end of
# the spectrum, because this would result in an empty spectrum.
if start is not None and start > self.get_end(unit):
raise ValueError('The start value is past the end of the '
'spectrum range')
# Get wavelength world coordinates of the output spectrum.
newwave = self.wave.resample(step, start, unit)
# How many pixels should there be in the resampled spectrum?
# If the user didn't specify this, use newwave.shape, which
# holds the number of pixels of size 'step' needed to sample
# from 'start' to the end of the current wavelength range.
if shape is not None:
newwave.shape = shape
# Get the existing wavelength step size in the new units.
oldstep = self.wave.get_step(unit)
# If the spectrum is being resampled to a larger pixel size,
# then a decimation filter should be applied before
# resampling, to ensure that the new pixel size doesn't
# undersample rapidly changing features in the spectrum.
if step > oldstep and atten > 0.0:
out._decimation_filter(step, atten, unit=unit)
# Get the data, mask (and variance) arrays, and replace bad pixels with
# zeros.
if out._mask is not None: # Is out.data a masked array?
data = out.data.filled(0.0)
if out._var is not None:
var = out.var.filled(0.0)
else:
var = None
mask = out._mask
else: # Is out.data just a numpy array?
mask = ~np.isfinite(out._data)
data = out._data.copy()
data[mask] = 0.0
if out.var is not None:
var = out.var.copy()
var[mask] = 0.0
else:
var = None
# Get the coordinates of the pixels of the input and output spectra.
xi = self.wave.coord()
xo = newwave.coord()
# Get a resampled versions of the data array, optionally the variance
# array, and a floating point version of the mask array. Note that the
# choice of linear interpolation is required to preserve flux.
data = interpolate.griddata(xi, data, xo, method="linear",
fill_value=np.nan)
if var is not None:
var = interpolate.griddata(xi, var, xo, method="linear",
fill_value=np.nan)
mask = interpolate.griddata(xi, mask.astype(float), xo,
method="linear", fill_value=1.0)
# Create a new boolean mask in which all pixels that had an integrated
# contribution of more than 'cutoff' originally masked pixels are
# masked. Note that setting the cutoff to the "obvious" value of zero
# results in lots of pixels being masked that are far away from any
# masked pixels, due to precision errors in the griddata()
# function. Limit the minimum value of the cutoff to avoid this.
mask = np.greater(mask, max(cutoff, 1.0e-6))
# If masked arrays were not in use in the original spectrum, fill
# bad pixels with NaNs.
if out._mask is None:
data[mask] = np.nan
if var is not None:
var[mask] = np.nan
mask = None
# Install the resampled arrays.
out._data = data
out._var = var
out._mask = mask
# Install the new wavelength world coordinates.
out.wave = newwave
# When up-sampling, decimation filter the output spectrum. The
# combination of this and the linear interpolation of the preceding
# griddata() produces a much better interpolation than a cubic spline
# filter can. In particular, a spline interpolation does not conserve
# flux, whereas linear interpolation plus decimation filtering does.
if step < oldstep and atten > 0.0:
out._decimation_filter(step, atten, unit=unit)
return out
def mean(self, lmin=None, lmax=None, weight=True, unit=u.angstrom):
"""Compute the mean flux over a specified wavelength range.
Parameters
----------
lmin : float
The minimum wavelength of the range, or None to choose the
wavelength of the first pixel in the spectrum.
lmax : float
The maximum wavelength of the range, or None to choose the
wavelength of the last pixel in the spectrum.
unit : `astropy.units.Unit`
The wavelength units of lmin and lmax. If None, lmin and
lmax are assumed to be pixel indexes.
weight : bool
If weight is True, compute the weighted mean, inversely
weighting each pixel by its variance.
Returns
-------
out : (float, float)
The mean flux and its error.
"""
# Don't attempt to perform a weighted mean if there are no variances.
if self._var is None:
weight = False
# Get the slice that selects the specified wavelength range.
try:
lambda_slice = self._wavelengths_to_slice(lmin, lmax, unit)
except ValueError:
return (0.0, np.inf)
# Obtain the mean flux of the sub-spectrum.
if weight:
weights = 1.0 / self.var[lambda_slice].filled(np.inf)
flux, wsum = np.ma.average(self.data[lambda_slice],
weights=weights, returned=True)
if self.var is not None:
err_flux = np.sqrt(
np.ma.sum(self.var[lambda_slice] * weights**2) / wsum**2)
else:
err_flux = np.inf
else:
flux, wsum = np.ma.average(self.data[lambda_slice], returned=True)
if self.var is not None:
err_flux = np.sqrt(np.ma.sum(self.var[lambda_slice])) / wsum**2
else:
err_flux = np.inf
return (flux, err_flux)
def sum(self, lmin=None, lmax=None, weight=True, unit=u.angstrom):
"""Obtain the sum of the fluxes within a specified wavelength range.
Parameters
----------
lmin : float
The minimum wavelength of the range, or None to choose the
wavelength of the first pixel in the spectrum.
lmax : float
The maximum wavelength of the range, or None to choose the
wavelength of the last pixel in the spectrum.
unit : `astropy.units.Unit`
The wavelength units of lmin and lmax. If None, lmin and
lmax are assumed to be pixel indexes.
weight : bool
If weight is True, compute the weighted sum, inversely
weighting each pixel by its variance.
Returns
-------
out : float, float
The total flux and its error.
"""
# Get the slice that selects the specified wavelength range.
try:
lambda_slice = self._wavelengths_to_slice(lmin, lmax, unit)
except ValueError:
return (0.0, np.inf)
# Perform a weighted sum?
if weight and self._var is not None:
weights = 1.0 / self.var[lambda_slice].filled(np.inf)
# How many unmasked pixels will be averaged?
nsum = np.ma.count(self.data[lambda_slice])
fmean, wsum = np.ma.average(self.data[lambda_slice],
weights=weights, returned=True)
# The weighted average multiplied by the number of unmasked pixels.
flux = fmean * nsum
if self.var is not None:
err_flux = np.sqrt(
np.ma.sum(self.var[lambda_slice] * weights**2) /
wsum**2 * nsum**2)
else:
err_flux = np.inf
else:
flux = self.data[lambda_slice].sum()
if self.var is not None:
err_flux = np.sqrt(np.ma.sum(self.var[lambda_slice]))
else:
err_flux = np.inf
return (flux, err_flux)
def integrate(self, lmin=None, lmax=None, unit=u.angstrom):
"""Integrate the flux over a specified wavelength range.
The units of the integrated flux depend on the flux units of
the spectrum and the wavelength units, as follows:
If the flux units of the spectrum, self.unit, are something
like Q per angstrom, Q per nm, or Q per um, then the
integrated flux will have the units of Q. For example, if the
fluxes have units of 1e-20 erg/cm2/Angstrom/s, then the units
of the integration will be 1e-20 erg/cm2/s.
Alternatively, if unit is not None, then the unit of the
returned number will be the product of the units in self.unit
and unit. For example, if the flux units are counts/s, and
unit=u.angstrom, then the integrated flux will have units
counts*Angstrom/s.
Finally, if unit is None, then the units of the returned
number will be the product of self.unit and the units of the
wavelength axis of the spectrum (ie. self.wave.unit).
The result of the integration is returned as an astropy
Quantity, which holds the integrated value and its physical
units. The units of the returned number can be determined
from the .unit attribute of the return value. Alternatively
the returned value can be converted to another unit, using the
to() method of astropy quantities.
Parameters
----------
lmin : float
The minimum wavelength of the range to be integrated,
or None (the default), to select the minimum wavelength
of the first pixel of the spectrum. If this is below the
minimum wavelength of the spectrum, the integration
behaves as though the flux in the first pixel extended
down to that wavelength.
If the unit argument is None, lmin is a pixel index, and
the wavelength of the center of this pixel is used as the
lower wavelength of the integration.
lmax : float
The maximum wavelength of the range to be integrated,
or None (the default), to select the maximum wavelength
of the last pixel of the spectrum. If this is above the
maximum wavelength of the spectrum, the integration
behaves as though the flux in the last pixel extended
up to that wavelength.
If the unit argument is None, lmax is a pixel index, and
the wavelength of the center of this pixel is used as the
upper wavelength of the integration.
unit : `astropy.units.Unit`
The wavelength units of lmin and lmax, or None to indicate
that lmin and lmax are pixel indexes.
Returns
-------
out : `astropy.units.Quantity`, `astropy.units.Quantity`
The result of the integration and its error, expressed as
a floating point number with accompanying units. The integrated
value and its physical units can be extracted using the .value and
.unit attributes of the returned quantity. The value can also be
converted to different units, using the .to() method of the
returned objected.
"""
# Get the index of the first pixel within the wavelength range,
# and the minimum wavelength of the integration.
if lmin is None:
i1 = 0
lmin = self.wave.coord(-0.5, unit=unit)
else:
if unit is None:
l1 = lmin
lmin = self.wave.coord(max(-0.5, l1))
else:
l1 = self.wave.pixel(lmin, False, unit)
i1 = max(0, int(l1))
# Get the index of the last pixel within the wavelength range, plus
# 1, and the maximum wavelength of the integration.
if lmax is None:
i2 = self.shape[0]
lmax = self.wave.coord(i2 - 0.5, unit=unit)
else:
if unit is None:
l2 = lmax
lmax = self.wave.coord(min(self.shape[0] - 0.5, l2))
else:
l2 = self.wave.pixel(lmax, False, unit)
i2 = min(self.shape[0], int(l2) + 1)
# Get the lower wavelength of each pixel, including one extra
# pixel at the end of the range.
d = self.wave.coord(-0.5 + np.arange(i1, i2 + 1), unit=unit)
# Change the wavelengths of the first and last pixels to
# truncate or extend those pixels to the starting and ending
# wavelengths of the spectrum.
d[0] = lmin
d[-1] = lmax
if unit is None:
unit = self.wave.unit
# Get the data of the subspectrum covered by the integration.
data = self.data[i1:i2]
# If the spectrum has been calibrated, the flux units will be
# per angstrom, per nm, per um etc. If these wavelength units
# don't match the units of the wavelength axis of the
# integration, then although the results will be correct, they
# will have inconvenient units. In such cases attempt to
# convert the units of the wavelength axis to match the flux
# units.
if unit in self.unit.bases: # The wavelength units already agree.
out_unit = self.unit * unit
else:
try:
# Attempt to determine the wavelength units of the flux density
wunit = (set(self.unit.bases) &
set([u.pm, u.angstrom, u.nm, u.um])).pop()
# Scale the wavelength axis to have the same wavelength units.
d *= unit.to(wunit)
# Get the final units of the integration.
out_unit = self.unit * wunit
# If the wavelength units of the flux weren't recognized,
# simply return the units unchanged.
except Exception:
out_unit = self.unit * unit
# Integrate the spectrum by multiplying the value of each pixel
# by the difference in wavelength from the start of that pixel to
# the start of the next pixel.
flux = (data * np.diff(d)).sum() * out_unit
if self.var is None:
err_flux = np.inf
else:
err_flux = np.sqrt((self.var[i1:i2] * np.diff(d)**2).sum())
return (flux, err_flux * out_unit)
def poly_fit(self, deg, weight=True, maxiter=0,
nsig=(-3.0, 3.0), verbose=False):
"""Perform polynomial fit on normalized spectrum and returns polynomial
coefficients.
Parameters
----------
deg : int
Polynomial degree.
weight : bool
If weight is True, the weight is computed as the inverse of
variance.
maxiter : int
Maximum allowed iterations (0)
nsig : (float,float)
The low and high rejection factor in std units (-3.0,3.0)
Returns
-------
out : ndarray, shape.
Polynomial coefficients ordered from low to high.
"""
if self.shape[0] <= deg + 1:
raise ValueError('Too few points to perform polynomial fit')
if self._var is None:
weight = False
if weight:
vec_weight = 1.0 / np.sqrt(np.abs(self.var.filled(np.inf)))
else:
vec_weight = None
if self._mask is np.ma.nomask:
d = self._data
w = self.wave.coord()
else:
mask = ~self._mask
d = self._data[mask]
w = self.wave.coord()[mask]
if weight:
vec_weight = vec_weight[mask]
# normalize w
w0 = np.min(w)
dw = np.max(w) - w0
w = (w - w0) / dw
p = np.polynomial.polynomial.polyfit(w, d, deg, w=vec_weight)
if maxiter > 0:
err = d - np.polynomial.polynomial.polyval(w, p)
sig = np.std(err)
n_p = len(d)
for it in range(maxiter):
ind = np.where((err >= nsig[0] * sig) &
(np.abs(err) <= nsig[1] * sig))
if len(ind[0]) == n_p:
break
if len(ind[0]) <= deg + 1:
raise ValueError('Too few points to perform '
'polynomial fit')
if vec_weight is not None:
vec_weight = vec_weight[ind]
p = np.polynomial.polynomial.polyfit(w[ind], d[ind],
deg, w=vec_weight)
err = d[ind] - np.polynomial.polynomial.polyval(w[ind], p)
sig = np.std(err)
n_p = len(ind[0])
if verbose:
self._logger.info('Number of iteration: %d Std: %10.4e '
'Np: %d Frac: %4.2f', it + 1, sig, n_p,
100. * n_p / self.shape[0])
return p
def poly_val(self, z):
"""Update in place the spectrum data from polynomial coefficients.
Uses `numpy.poly1d`.
Parameters
----------
z : array
The polynomial coefficients, in increasing powers:
data = z0 + z1(lbda-min(lbda))/(max(lbda)-min(lbda)) + ...
+ zn ((lbda-min(lbda))/(max(lbda)-min(lbda)))**n
"""
l = self.wave.coord()
w0 = | np.min(l) | numpy.min |
import numpy as np
from scipy.spatial.distance import cdist
class Segreg(object):
def __init__(self):
self.attributeMatrix = np.matrix([]) # attributes matrix full size - all columns
self.location = [] # x and y coordinates from tract centroid (2D lists)
self.pop = [] # population of each groups by tract (2D lists)
self.pop_sum = [] # total population of the tract (sum all groups)
self.locality = [] # population intensity by groups by tract
self.n_location = 0 # length of list (n lines) (attributeMatrix.shape[0])
self.n_group = 0 # number of groups (attributeMatrix.shape[1] - 4)
self.costMatrix = [] # scipy cdist distance matrix
self.tract_id = [] # tract ids in string format
def readAttributesFile(self, filepath):
"""
This function reads the csv file and populate the class's attributes. Data has to be exactly in the
following format or results will be wrong:
area id, x_coord, y_coord, attribute 1, attributes 2, attributes 3, attribute n...
:param filepath: path with file to be read
:return: attribute Matrix [n,n]
"""
raw_data = np.genfromtxt(filepath, skip_header=1, delimiter=",", filling_values=0, dtype=None)
data = [list(item)[1:] for item in raw_data]
self.attributeMatrix = np.asmatrix(data)
n = self.attributeMatrix.shape[1]
self.location = self.attributeMatrix[:, 0:2]
self.location = self.location.astype('float')
self.pop = self.attributeMatrix[:, 2:n].astype('int')
# self.pop[np.where(self.pop < 0)[0], np.where(self.pop < 0)[1]] = 0
self.n_group = n-2
self.n_location = self.attributeMatrix.shape[0]
self.pop_sum = np.sum(self.pop, axis=1)
self.tract_id = np.asarray([x[0] for x in raw_data]).astype(str)
self.tract_id = self.tract_id.reshape((self.n_location, 1))
return self.attributeMatrix
def getWeight(self, distance, bandwidth, weightmethod=1):
"""
This function computes the weights for neighborhood. Default value is Gaussian(1)
:param distance: distance in meters to be considered for weighting
:param bandwidth: bandwidth in meters selected to perform neighborhood
:param weightmethod: method to be used: 1-gussian , 2-bi square and empty-moving windows
:return: weight array for internal use
"""
distance = np.asarray(distance.T)
if weightmethod == 1:
weight = np.exp((-0.5) * (distance/bandwidth) * (distance/bandwidth))
elif weightmethod == 2:
weight = (1 - (distance/bandwidth)*(distance/bandwidth)) * (1 - (distance/bandwidth)*(distance/bandwidth))
sel = np.where(distance > bandwidth)
weight[sel[0]] = 0
elif weightmethod == 3:
weight = (1 + (distance * 0))
sel = np.where(distance > bandwidth)
weight[sel[0]] = 0
else:
raise Exception('Invalid weight method selected!')
return weight
def cal_timeMatrix(self, bandwidth, weightmethod, matrix):
"""
This function calculate the local population intensity for all groups based on a time matrix.
:param bandwidth: bandwidth for neighborhood in meters
:param weightmethod: 1 for gaussian, 2 for bi-square and empty for moving window
:param matrix: path/file for input time matrix
:return: 2d array like with population intensity for all groups
"""
n_local = self.location.shape[0]
n_subgroup = self.pop.shape[1]
locality_temp = np.empty([n_local, n_subgroup])
for index in range(0, n_local):
for index_sub in range(0, n_subgroup):
cost = matrix[index, :].reshape(1, n_local)
weight = self.getWeight(cost, bandwidth, weightmethod)
locality_temp[index, index_sub] = np.sum(weight * np.asarray(self.pop[:, index_sub])) / np.sum(weight)
self.locality = locality_temp
self.locality[np.where(self.locality < 0)[0], np.where(self.locality < 0)[1]] = 0
return locality_temp
def cal_localityMatrix(self, bandwidth=5000, weightmethod=1):
"""
This function calculate the local population intensity for all groups.
:param bandwidth: bandwidth for neighborhood in meters
:param weightmethod: 1 for gaussian, 2 for bi-square and empty for moving window
:return: 2d array like with population intensity for all groups
"""
n_local = self.location.shape[0]
n_subgroup = self.pop.shape[1]
locality_temp = np.empty([n_local, n_subgroup])
for index in range(0, n_local):
for index_sub in range(0, n_subgroup):
cost = cdist(self.location[index, :], self.location)
weight = self.getWeight(cost, bandwidth, weightmethod)
locality_temp[index, index_sub] = np.sum(weight * np.asarray(self.pop[:, index_sub]))/np.sum(weight)
self.locality = locality_temp
self.locality[np.where(self.locality < 0)[0], np.where(self.locality < 0)[1]] = 0
return locality_temp
def cal_localDissimilarity(self):
"""
Compute local dissimilarity for all groups.
:return: 1d array like with results for all groups, size of localities
"""
if len(self.locality) == 0:
lj = np.ravel(self.pop_sum)
tjm = np.asarray(self.pop) * 1.0 / lj[:, None]
tm = np.sum(self.pop, axis=0) * 1.0 / np.sum(self.pop)
index_i = np.sum(np.asarray(tm) * | np.asarray(1 - tm) | numpy.asarray |
# Copyright 2019 RBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# evaluate.py is used to create the synthetic data generation and evaluation pipeline.
import argparse
import collections
import os
import numpy as np
import pandas as pd
from scipy.special import expit
from sklearn import preprocessing
from sklearn.ensemble import BaggingRegressor, GradientBoostingClassifier, RandomForestClassifier
from sklearn.linear_model import ElasticNet, Lasso, LogisticRegression, Ridge
from sklearn.metrics import mean_squared_error, roc_auc_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier, MLPRegressor
from models import dp_wgan, pate_gan, ron_gauss
from models.IMLE import imle
from models.Private_PGM import private_pgm
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument(
"--categorical", action="store_true", help="All attributes of the data are categorical with small domains"
)
parser.add_argument("--target-variable", help="Required if data has a target class")
parser.add_argument("--train-data-path", required=True)
parser.add_argument("--test-data-path", required=True)
parser.add_argument("--normalize-data", action="store_true", help="Apply sigmoid function to each value in the data")
parser.add_argument("--disable-cuda", action="store_true", help="Disable CUDA")
parser.add_argument("--downstream-task", default="classification", help="classification | regression")
privacy_parser = argparse.ArgumentParser(add_help=False)
privacy_parser.add_argument("--enable-privacy", action="store_true", help="Enable private data generation")
privacy_parser.add_argument("--target-epsilon", type=float, default=8, help="Epsilon differential privacy parameter")
privacy_parser.add_argument("--target-delta", type=float, default=1e-5, help="Delta differential privacy parameter")
privacy_parser.add_argument("--save-synthetic", action="store_true", help="Save the synthetic data into csv")
privacy_parser.add_argument("--output-data-path", help="Required if synthetic data needs to be saved")
noisy_sgd_parser = argparse.ArgumentParser(add_help=False)
noisy_sgd_parser.add_argument(
"--sigma",
type=float,
default=2,
help="Gaussian noise variance multiplier. A larger sigma will make the model "
"train for longer epochs for the same privacy budget",
)
noisy_sgd_parser.add_argument(
"--clip-coeff",
type=float,
default=0.1,
help="The coefficient to clip the gradients before adding noise for private " "SGD training",
)
noisy_sgd_parser.add_argument(
"--micro-batch-size",
type=int,
default=8,
help="Parameter to tradeoff speed vs efficiency. Gradients are averaged for a microbatch "
"and then clipped before adding noise",
)
noisy_sgd_parser.add_argument("--num-epochs", type=int, default=500)
noisy_sgd_parser.add_argument("--batch-size", type=int, default=64)
subparsers = parser.add_subparsers(help="generative model type", dest="model")
parser_pate_gan = subparsers.add_parser("pate-gan", parents=[privacy_parser])
parser_pate_gan.add_argument(
"--lap-scale",
type=float,
default=0.0001,
help="Inverse laplace noise scale multiplier. A larger lap_scale will "
"reduce the noise that is added per iteration of training.",
)
parser_pate_gan.add_argument("--batch-size", type=int, default=64)
parser_pate_gan.add_argument(
"--num-teachers", type=int, default=10, help="Number of teacher disciminators in the pate-gan model"
)
parser_pate_gan.add_argument(
"--teacher-iters", type=int, default=5, help="Teacher iterations during training per generator iteration"
)
parser_pate_gan.add_argument(
"--student-iters", type=int, default=5, help="Student iterations during training per generator iteration"
)
parser_pate_gan.add_argument(
"--num-moments", type=int, default=100, help="Number of higher moments to use for epsilon calculation for pate-gan"
)
parser_ron_gauss = subparsers.add_parser("ron-gauss", parents=[privacy_parser])
parser_pgm = subparsers.add_parser("private-pgm", parents=[privacy_parser])
parser_real_data = subparsers.add_parser("real-data")
parser_imle = subparsers.add_parser("imle", parents=[privacy_parser, noisy_sgd_parser])
parser_imle.add_argument("--decay-step", type=int, default=25)
parser_imle.add_argument("--decay-rate", type=float, default=1.0)
parser_imle.add_argument(
"--staleness", type=int, default=5, help="Number of iterations after which new synthetic samples are generated"
)
parser_imle.add_argument(
"--num-samples-factor", type=int, default=10, help="Number of synthetic samples generated per real data point"
)
parser_dp_wgan = subparsers.add_parser("dp-wgan", parents=[privacy_parser, noisy_sgd_parser])
parser_dp_wgan.add_argument("--clamp-lower", type=float, default=-0.01, help="Clamp parameter for wasserstein GAN")
parser_dp_wgan.add_argument("--clamp-upper", type=float, default=0.01, help="Clamp parameter for wasserstein GAN")
opt = parser.parse_args()
# Loading the data
train = pd.read_csv(opt.train_data_path)
test = pd.read_csv(opt.test_data_path)
data_columns = [col for col in train.columns if col != opt.target_variable]
if opt.categorical:
combined = train.append(test)
config = {}
for col in combined.columns:
col_count = len(combined[col].unique())
config[col] = col_count
class_ratios = None
if opt.downstream_task == "classification":
class_ratios = (
train[opt.target_variable].sort_values().groupby(train[opt.target_variable]).size().values / train.shape[0]
)
X_train = np.nan_to_num(train.drop([opt.target_variable], axis=1).values)
y_train = np.nan_to_num(train[opt.target_variable].values)
X_test = np.nan_to_num(test.drop([opt.target_variable], axis=1).values)
y_test = np.nan_to_num(test[opt.target_variable].values)
if opt.normalize_data:
X_train = expit(X_train)
X_test = expit(X_test)
input_dim = X_train.shape[1]
z_dim = int(input_dim / 4 + 1) if input_dim % 4 == 0 else int(input_dim / 4)
conditional = opt.downstream_task == "classification"
# Training the generative model
if opt.model == "pate-gan":
Hyperparams = collections.namedtuple(
"Hyperarams", "batch_size num_teacher_iters num_student_iters num_moments lap_scale class_ratios lr"
)
Hyperparams.__new__.__defaults__ = (None, None, None, None, None, None, None)
model = pate_gan.PATE_GAN(input_dim, z_dim, opt.num_teachers, opt.target_epsilon, opt.target_delta, conditional)
model.train(
X_train,
y_train,
Hyperparams(
batch_size=opt.batch_size,
num_teacher_iters=opt.teacher_iters,
num_student_iters=opt.student_iters,
num_moments=opt.num_moments,
lap_scale=opt.lap_scale,
class_ratios=class_ratios,
lr=1e-4,
),
)
elif opt.model == "dp-wgan":
Hyperparams = collections.namedtuple(
"Hyperarams", "batch_size micro_batch_size clamp_lower clamp_upper clip_coeff sigma class_ratios lr num_epochs"
)
Hyperparams.__new__.__defaults__ = (None, None, None, None, None, None, None, None, None)
model = dp_wgan.DP_WGAN(input_dim, z_dim, opt.target_epsilon, opt.target_delta, conditional)
model.train(
X_train,
y_train,
Hyperparams(
batch_size=opt.batch_size,
micro_batch_size=opt.micro_batch_size,
clamp_lower=opt.clamp_lower,
clamp_upper=opt.clamp_upper,
clip_coeff=opt.clip_coeff,
sigma=opt.sigma,
class_ratios=class_ratios,
lr=5e-5,
num_epochs=opt.num_epochs,
),
private=opt.enable_privacy,
)
elif opt.model == "ron-gauss":
model = ron_gauss.RONGauss(z_dim, opt.target_epsilon, opt.target_delta, conditional)
elif opt.model == "imle":
Hyperparams = collections.namedtuple(
"Hyperarams",
"lr batch_size micro_batch_size sigma num_epochs class_ratios clip_coeff decay_step decay_rate staleness num_samples_factor",
)
Hyperparams.__new__.__defaults__ = (None, None, None, None, None, None, None, None)
model = imle.IMLE(input_dim, z_dim, opt.target_epsilon, opt.target_delta, conditional)
model.train(
X_train,
y_train,
Hyperparams(
lr=1e-3,
batch_size=opt.batch_size,
micro_batch_size=opt.micro_batch_size,
sigma=opt.sigma,
num_epochs=opt.num_epochs,
class_ratios=class_ratios,
clip_coeff=opt.clip_coeff,
decay_step=opt.decay_step,
decay_rate=opt.decay_rate,
staleness=opt.staleness,
num_samples_factor=opt.num_samples_factor,
),
private=opt.enable_privacy,
)
elif opt.model == "private-pgm":
if not conditional:
raise Exception("Private PGM cannot be used to generate data for regression")
model = private_pgm.Private_PGM(opt.target_variable, opt.target_epsilon, opt.target_delta)
model.train(train, config)
# Generating synthetic data from the trained model
if opt.model == "real-data":
X_syn = X_train
y_syn = y_train
elif opt.model == "ron-gauss":
if conditional:
X_syn, y_syn, dp_mean_dict = model.generate(X_train, y=y_train)
for label in np.unique(y_test):
idx = | np.where(y_test == label) | numpy.where |
# -*- coding: utf-8 -*-
# This work is part of the Core Imaging Library (CIL) developed by CCPi
# (Collaborative Computational Project in Tomographic Imaging), with
# substantial contributions by UKRI-STFC and University of Manchester.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cil.framework import DataProcessor, AcquisitionData, ImageData, DataContainer, ImageGeometry
import warnings
import numpy
from scipy import special, ndimage
class MaskGenerator(DataProcessor):
r'''
Processor to detect outliers and return a mask with 0 where outliers were detected, and 1 for other pixels. Please use the desiried method to configure a processor for your needs.
'''
@staticmethod
def special_values(nan=True, inf=True):
r'''This creates a MaskGenerator processor which generates a mask for inf and/or nan values.
:param nan: mask NaN values
:type nan: bool, default=True
:param inf: mask INF values
:type inf: bool, default=True
'''
if nan is True:
if inf is True:
processor = MaskGenerator(mode='special_values')
else:
processor = MaskGenerator(mode='nan')
else:
if inf is True:
processor = MaskGenerator(mode='inf')
else:
raise ValueError("Please specify at least one type of value to threshold on")
return processor
@staticmethod
def threshold(min_val=None, max_val=None):
r'''This creates a MaskGenerator processor which generates a mask for values outside boundaries
:param min_val: lower boundary
:type min_val: float, default=None
:param max_val: upper boundary
:type max_val: float, default=None
'''
processor = MaskGenerator(mode='threshold', threshold_value=(min_val,max_val))
return processor
@staticmethod
def quantile(min_quantile=None, max_quantile=None):
r'''This creates a MaskGenerator processor which generates a mask for values outside boundaries
:param min_quantile: lower quantile, 0-1
:type min_quantile: float, default=None
:param max_quantile: upper quantile, 0-1
:type max_quantile: float, default=None
'''
processor = MaskGenerator(mode='quantile', quantiles=(min_quantile,max_quantile))
return processor
@staticmethod
def mean(axis=None, threshold_factor=3, window=None):
r'''This creates a MaskGenerator processor which generates a mask for values outside a multiple of standard-devaiations from the mean.
abs(A - mean(A)) < threshold_factor * std(A).
:param threshold_factor: scale factor of standard-deviations to use as threshold
:type threshold_factor: float, default=3
:param axis: specify axis as int or from 'dimension_labels' to calculate mean. If no axis is specified then operates over flattened array.
:type axis: int, string
:param window: specify number of pixels to use in calculation of a rolling mean
:type window: int, default=None
'''
if window == None:
processor = MaskGenerator(mode='mean', threshold_factor=threshold_factor, axis=axis)
else:
processor = MaskGenerator(mode='movmean', threshold_factor=threshold_factor, axis=axis, window=window)
return processor
@staticmethod
def median(axis=None, threshold_factor=3, window=None):
r'''This creates a MaskGenerator processor which generates a mask for values outside a multiple of median absolute deviation (MAD) from the mean.
abs(A - median(A)) < threshold_factor * MAD(A),
MAD = c*median(abs(A-median(A))) where c=-1/(sqrt(2)*erfcinv(3/2))
:param threshold_factor: scale factor of MAD to use as threshold
:type threshold_factor: float, default=3
:param axis: specify axis as int or from 'dimension_labels' to calculate mean. If no axis is specified then operates over flattened array.
:type axis: int, string
:param window: specify number of pixels to use in calculation of a rolling median
:type window: int, default=None
'''
if window == None:
processor = MaskGenerator(mode='median', threshold_factor=threshold_factor, axis=axis)
else:
processor = MaskGenerator(mode='movmedian', threshold_factor=threshold_factor, axis=axis, window=window)
return processor
def __init__(self,
mode='special_values',
threshold_value=(None, None),
quantiles=(None, None),
threshold_factor=3,
window=5,
axis=None):
r'''Processor to detect outliers and return mask with 0 where outliers were detected and 1 for other pixels.
:param mode: a method for detecting outliers (special_values, nan, inf, threshold, quantile, mean, median, movmean, movmedian)
:type mode: string, default='special_values'
:param threshold_value: specify lower and upper boundaries if 'threshold' mode is selected
:type threshold_value: tuple
:param quantiles: specify lower and upper quantiles if 'quantile' mode is selected
:type quantiles: tuple
:param threshold_factor: scales detection threshold (standard deviation in case of 'mean', 'movmean' and median absolute deviation in case of 'median', movmedian')
:type threshold_factor: float, default=3
:param window: specify running window if 'movmean' or 'movmedian' mode is selected
:type window: int, default=5
:param axis: specify axis to alculate statistics for 'mean', 'median', 'movmean', 'movmean' modes
:type axis: int, string
:return: returns a DataContainer with boolean mask with 0 where outliers were detected
:rtype: DataContainer
- special_values test element-wise for both inf and nan
- nan test element-wise for nan
- inf test element-wise for inf
- threshold test element-wise if array values are within boundaries
given by threshold_values = (float,float).
You can secify only lower threshold value by setting another to None
such as threshold_values = (float,None), then
upper boundary will be amax(data). Similarly, to specify only upper
boundary, use threshold_values = (None,float). If both threshold_values
are set to None, then original array will be returned.
- quantile test element-wise if array values are within boundaries
given by quantiles = (q1,q2), 0<=q1,q2<=1.
You can secify only lower quantile value by setting another to None
such as quantiles = (float,q2), then
upper boundary will be amax(data). Similarly, to specify only upper
boundary, use quantiles = (None,q1). If both quantiles
are set to None, then original array will be returned.
- mean test element-wise if
abs(A - mean(A)) < threshold_factor * std(A).
Default value of threshold_factor is 3. If no axis is specified,
then operates over flattened array. Alternatively operates along axis specified
as dimension_label.
- median test element-wise if
abs(A - median(A)) < threshold_factor * scaled MAD(A),
scaled median absolute deviation (MAD) is defined as
c*median(abs(A-median(A))) where c=-1/(sqrt(2)*erfcinv(3/2))
Default value of threshold_factor is 3. If no axis is specified,
then operates over flattened array. Alternatively operates along axis specified
as dimension_label.
- movmean the same as mean but uses rolling mean with a specified window,
default window value is 5
- movmedian the same as mean but uses rolling median with a specified window,
default window value is 5
'''
kwargs = {'mode': mode,
'threshold_value': threshold_value,
'threshold_factor': threshold_factor,
'quantiles': quantiles,
'window': window,
'axis': axis}
super(MaskGenerator, self).__init__(**kwargs)
def check_input(self, data):
if self.mode not in ['special_values', 'nan', 'inf', 'threshold', 'quantile',
'mean', 'median', 'movmean', 'movmedian']:
raise Exception("Wrong mode. One of the following is expected:\n" +
"special_values, nan, inf, threshold, \n quantile, mean, median, movmean, movmedian")
if self.axis is not None and type(self.axis) is not int:
if self.axis not in data.dimension_labels:
raise Exception("Wrong label is specified for axis. " +
"Expected {}, got {}.".format(data.dimension_labels, self.axis))
return True
def process(self, out=None):
# get input DataContainer
data = self.get_input()
try:
arr = data.as_array()
except:
arr = data
ndim = arr.ndim
try:
axis_index = data.dimension_labels.index(self.axis)
except:
if type(self.axis) == int:
axis_index = self.axis
else:
axis_index = None
# intialise mask with all ones
mask = numpy.ones(arr.shape, dtype=numpy.bool)
# if NaN or +/-Inf
if self.mode == 'special_values':
mask[numpy.logical_or(numpy.isnan(arr), numpy.isinf(arr))] = 0
elif self.mode == 'nan':
mask[numpy.isnan(arr)] = 0
elif self.mode == 'inf':
mask[numpy.isinf(arr)] = 0
elif self.mode == 'threshold':
if not(isinstance(self.threshold_value, tuple)):
raise Exception("Threshold value must be given as a tuple containing two values,\n" +\
"use None if no threshold value is given")
threshold = self._parse_threshold_value(arr, quantile=False)
mask[numpy.logical_or(arr < threshold[0], arr > threshold[1])] = 0
elif self.mode == 'quantile':
if not(isinstance(self.quantiles, tuple)):
raise Exception("Quantiles must be given as a tuple containing two values,\n " + \
"use None if no quantile value is given")
quantile = self._parse_threshold_value(arr, quantile=True)
mask[numpy.logical_or(arr < quantile[0], arr > quantile[1])] = 0
elif self.mode == 'mean':
# if mean along specific axis
if axis_index is not None:
tile_par = []
slice_obj = []
for i in range(ndim):
if i == axis_index:
tile_par.append(axis_index)
slice_obj.append(numpy.newaxis)
else:
tile_par.append(1)
slice_obj.append(slice(None, None, 1))
tile_par = tuple(tile_par)
slice_obj = tuple(slice_obj)
tmp_mean = numpy.tile((numpy.mean(arr, axis=axis_index))[slice_obj], tile_par)
tmp_std = numpy.tile((numpy.std(arr, axis=axis_index))[slice_obj], tile_par)
mask[numpy.abs(arr - tmp_mean) > self.threshold_factor * tmp_std] = 0
# if global mean
else:
mask[numpy.abs(arr - numpy.mean(arr)) > self.threshold_factor * numpy.std(arr)] = 0
elif self.mode == 'median':
c = -1 / (numpy.sqrt(2) * special.erfcinv(3 / 2))
# if median along specific axis
if axis_index is not None:
tile_par = []
slice_obj = []
for i in range(ndim):
if i == axis_index:
tile_par.append(axis_index)
slice_obj.append(numpy.newaxis)
else:
tile_par.append(1)
slice_obj.append(slice(None, None, 1))
tile_par = tuple(tile_par)
slice_obj = tuple(slice_obj)
tmp = numpy.abs(arr - numpy.tile((numpy.median(arr, axis=axis_index))[slice_obj], tile_par))
median_absolute_dev = numpy.tile((numpy.median(tmp, axis=axis_index))[slice_obj], tile_par)
mask[tmp > self.threshold_factor * c * median_absolute_dev] = 0
# if global median
else:
tmp = numpy.abs(arr - numpy.median(arr))
mask[tmp > self.threshold_factor * c * numpy.median(tmp)] = 0
elif self.mode == 'movmean':
# if movmean along specific axis
if axis_index is not None:
kernel = [1] * ndim
kernel[axis_index] = self.window
kernel = tuple(kernel)
mean_array = ndimage.generic_filter(arr, numpy.mean, size=kernel, mode='reflect')
std_array = ndimage.generic_filter(arr, numpy.std, size=kernel, mode='reflect')
mask[numpy.abs(arr - mean_array) > self.threshold_factor * std_array] = 0
# if global movmean
else:
mean_array = ndimage.generic_filter(arr, numpy.mean, size=(self.window,)*ndim, mode='reflect')
std_array = ndimage.generic_filter(arr, numpy.std, size=(self.window,)*ndim, mode='reflect')
mask[numpy.abs(arr - mean_array) > self.threshold_factor * std_array] = 0
elif self.mode == 'movmedian':
c = -1 / (numpy.sqrt(2) * special.erfcinv(3 / 2))
# if movmedian along specific axis
if axis_index is not None:
# construct filter kernel
kernel_shape = []
for i in range(ndim):
if i == axis_index:
kernel_shape.append(self.window)
else:
kernel_shape.append(1)
kernel_shape = tuple(kernel_shape)
median_array = ndimage.median_filter(arr, footprint=kernel_shape, mode='reflect')
tmp = abs(arr - median_array)
mask[tmp > self.threshold_factor * c * ndimage.median_filter(tmp, footprint=kernel_shape, mode='reflect')] = 0
# if global movmedian
else:
# construct filter kernel
kernel_shape = tuple([self.window]*ndim)
median_array = ndimage.median_filter(arr, size=kernel_shape, mode='reflect')
tmp = abs(arr - median_array)
mask[tmp > self.threshold_factor * c * ndimage.median_filter(tmp, size=kernel_shape, mode='reflect')] = 0
else:
raise ValueError('Mode not recognised. One of the following is expected: ' + \
'special_values, nan, inf, threshold, quantile, mean, median, movmean, movmedian')
if out is None:
mask = numpy.asarray(mask, dtype=numpy.bool)
out = type(data)(mask, deep_copy=False, dtype=mask.dtype, geometry=data.geometry, suppress_warning=True, dimension_labels=data.dimension_labels)
return out
else:
out.fill(mask)
def _parse_threshold_value(self, arr, quantile=False):
lower_val = None
upper_val = None
if quantile == True:
if self.quantiles[0] is not None:
lower_val = numpy.quantile(arr, self.quantiles[0])
if self.quantiles[1] is not None:
upper_val = numpy.quantile(arr, self.quantiles[1])
else:
if self.threshold_value[0] is not None:
lower_val = self.threshold_value[0]
if self.threshold_value[1] is not None:
upper_val = self.threshold_value[1]
if lower_val is None:
lower_val = | numpy.amin(arr) | numpy.amin |
from sklearn import datasets as ds
from DSTK.GAM.gam import GAM, ShapeFunction
from DSTK.tests.tests_gam.test_shape_function import _create_partition
from DSTK.GAM.base_gam import load_from_tar
import numpy as np
import os
import shutil
cancer_ds = ds.load_breast_cancer()
data = cancer_ds['data'][:, :20]
labels = 2 * cancer_ds['target'] - 1
assert_scores = [
[0.5538394842641805, 0.44616051573581944],
[0.49861290044203543, 0.5013870995579646],
[0.5470227126670573, 0.4529772873329428],
[0.513940794277825, 0.48605920572217504],
[0.529758125364891, 0.470241874635109]
]
test_root_folder = '/tmp/test_gam_serialization'
def teardown():
if os.path.exists(test_root_folder):
shutil.rmtree(test_root_folder)
def test_gam_training():
gam = GAM(max_depth=3, max_leaf_nodes=5, random_state=42, balancer_seed=42)
gam.train(data, labels, n_iter=5, learning_rate=0.0025, num_bags=1, num_workers=3)
for idx, vec in enumerate(data[:5, :]):
gam_scores = gam.score(vec)
np.testing.assert_almost_equal(np.sum(gam_scores), 1.0, 10)
np.testing.assert_almost_equal(gam_scores, assert_scores[idx], 10)
def test_correct_scoring():
func1 = ShapeFunction(_create_partition(np.linspace(1, 10, 10)), | np.linspace(1, 10, 11) | numpy.linspace |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from itertools import repeat
import warnings
import numpy as np
import scipy.ndimage as nd
import astropy.units as u
from astropy.io import fits
from astropy.time import Time
from astropy.wcs import WCS
from astropy.stats import sigma_clipped_stats
from . import ZChecker
from sbsearch import util
from .exceptions import BadStackSet, StackIDError
def desg2file(s): return s.replace('/', '').replace(' ', '').lower()
# no data below this value is useful; used to test if reference
# subtracted image is bad
DATA_FLOOR = -100
# default colors for color correction (solar)
COLOR_DEFAULT = {
'R - i': 0.12,
'g - R': 0.39
}
class ZStack(ZChecker):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger.info('ZStack')
if not os.path.exists(self.config['stack path']):
os.mkdir(self.config['stack path'])
def clean_missing(self):
count = self.db.execute('''
SELECT count() FROM ztf_stacks
WHERE stackfile IS NOT NULL
''').fetchone()[0]
self.logger.info('Checking {} files.'.format(count))
rows = self.db.iterate_over('''
SELECT stackid,stackfile FROM ztf_stacks
WHERE stackfile IS NOT NULL
''', [])
exists = 0
# use transaction to avoid affecting previous query
with self.db as con:
for stackid, fn in rows:
if os.path.exists(os.path.join(self.config['stack path'], fn)):
exists += 1
continue
self.logger.error('{} was expected, but does not exist.'
.format(fn))
con.execute('''
UPDATE ztf_cutouts SET stackid=NULL WHERE stackid=?
''', [stackid])
con.execute('''
DELETE FROM ztf_stacks WHERE stackid=?
''', [stackid])
# file is missing, but still gets moved to ztf_stale_files, so
# clean that up too:
self.clean_stale_files()
self.logger.info('{} files verified, {} database rows removed'
.format(exists, count - exists))
def stack(self, scale_by, n_baseline, objects=None, restack=False, start=None, stop=None):
data = self._data_iterator(n_baseline, objects, restack, start, stop)
for n, stackid, fn, nightlyids, nightly, baseline in data:
# file exists and overwrite mode disabled? something went wrong!
if (self._check_target_paths(self.config['stack path'], fn)
and not restack):
self.logger.error(
'Stack file exists, but was not expected. Deleted: {}'
.format(fn)
)
os.unlink(os.sep.join((self.config['stack path'], fn)))
self.logger.info('[{}] {}'.format(n, fn))
# only stack calibrated data
headers = [
fits.getheader(
os.path.join(self.config['cutout path'], f)
)
for f in sorted(nightly)
]
calibrated = [h.get('MAGZP', -1) > 0 for h in headers]
if sum(calibrated) == 0:
self.db.executemany('''
UPDATE ztf_cutouts SET stackid=NULL WHERE foundid=?
''', ((i,) for i in nightlyids))
continue
# setup FITS object, primary HDU is just a header
hdu = fits.HDUList()
primary_header = self._header(self.config['cutout path'],
nightly[calibrated])
hdu.append(fits.PrimaryHDU(header=primary_header))
# update header with baseline info
# only stack calibrated data
baseline_headers = [
fits.getheader(
os.path.join(self.config['cutout path'], f)
)
for f in baseline
]
baseline_calibrated = [h.get('MAGZP', -1) > 0 for h in baseline_headers]
baseline = baseline[baseline_calibrated]
h = self._header(self.config['cutout path'], baseline)
metadata = (
('BLPID', 'DBPID', 'Baseline processed-image IDs'),
('BLNIMAGE', 'NIMAGES', 'Number of images in baseline'),
('BLEXP', 'EXPOSURE', 'Total baseline exposure time (s)'),
('BLOBSJD1', 'OBSJD1', 'Total baseline exposure time (s)'),
('BLOBSJDN', 'OBSJDN', 'Last baseline shutter start time'),
('BLOBSJDM', 'OBSJDM', 'Mean baseline shutter start time'))
for key, name, comment in metadata:
hdu[0].header[key] = h.get(name), comment
# combine nightly
rh0 = primary_header['RH']
delta0 = primary_header['DELTA']
try:
im, ref = self._combine(nightly[calibrated], 'nightly',
rh0, delta0,
self.config['cutout path'])
hdu.append(im)
if ref:
hdu.append(ref)
except BadStackSet:
continue
# loop over scaling models
for i in range(len(scale_by)):
# combine baseline
if len(baseline) > 0:
try:
im, ref = self._combine(baseline, scale_by[i],
rh0, delta0,
self.config['cutout path'])
except BadStackSet:
continue
im.name = im.name + ' BL'
hdu.append(im)
if ref:
ref.name = ref.name + ' BL'
hdu.append(ref)
# database update
if len(hdu) > 1:
# images were stacked
cursor = self.db.execute('''
INSERT OR REPLACE INTO ztf_stacks VALUES (?,?,?)
''', (stackid, fn, Time.now().iso[:-4]))
stackid = cursor.lastrowid # in case this is a new stack
# If there is a stale file, clean it before saving the
# new stack.
self.clean_stale_files()
# OK to save
hdu.writeto(os.path.join(self.config['stack path'], fn),
overwrite=restack)
self.db.executemany('''
UPDATE ztf_cutouts SET stackid=? WHERE foundid=?
''', zip(repeat(stackid), nightlyids))
else:
# images were skipped
if stackid:
# was previously stacked but some kind of error this time
self.clean_stacks([stackid])
self.logger.error(
('Unsuccessful stack {}, deleting previous data.')
.format(fn))
self.db.executemany('''
UPDATE ztf_cutouts SET stackid=NULL WHERE foundid=?
''', ((i,) for i in nightlyids))
self.db.commit()
def _data_iterator(self, n_baseline, objects, restack, start, stop):
"""Find and return images to stack."""
cmd = '''
SELECT nightid,date,objid,desg,filtercode FROM ztf_found
INNER JOIN ztf_cutouts USING (foundid)
INNER JOIN obj USING (objid)
INNER JOIN ztf_nights USING (nightid)
LEFT JOIN ztf_stacks USING (stackid)
'''
constraints = [
('sangleimg>0', None), ('maglimit>0', None)
]
if start is not None:
constraints.append(('date>=?', start))
if stop is not None:
constraints.append(('date<=?', stop))
if objects:
objids = [obj[0] for obj in self.db.resolve_objects(objects)]
q = ','.join('?' * len(objids))
object_constraint = [('objid IN ({})'.format(q), objids)]
else:
object_constraint = []
if restack:
stack_constraint = []
else:
# only nights with with images not yet stacked
stack_constraint = [('(stackfile IS NULL)', None)]
# must group by filter, otherwise photometric corrections /
# header info will fail / be wrong
cmd, parameters = util.assemble_sql(
cmd, [], constraints + stack_constraint + object_constraint)
cmd += ' GROUP BY nightid,objid,filtercode'
obs_sets = self.db.execute(cmd, parameters).fetchall()
count = len(obs_sets)
if count == 0:
self.logger.info('No sets to stack')
return
elif count == 1:
self.logger.info('1 set to stack.')
else:
self.logger.info('{} sets to stack.'.format(count))
for nightid, night, objid, desg, filt in obs_sets:
# determine nights to inspect, including baseline
jd = Time(night).jd
start_jd = jd - n_baseline
stop_jd = jd + 1
# find all data to stack, including baseline nights
cons = constraints.copy()
cons.extend([('objid=?', objid),
('obsjd >= ?', start_jd),
('obsjd <= ?', stop_jd),
('filtercode=?', filt)])
cmd, parameters = util.assemble_sql('''
SELECT stackid,foundid,obsjd,rh,rdot,archivefile,date FROM ztf_found
INNER JOIN ztf_cutouts USING (foundid)
INNER JOIN ztf_nights USING (nightid)
''', [], cons)
rows = self.db.execute(cmd, parameters).fetchall()
obsjd, rh, rdot = np.empty((3, len(rows)))
stackids, foundid = np.empty((2, len(rows)), int)
archivefiles = []
for i, row in enumerate(rows):
if row['stackid']:
stackids[i] = row['stackid']
else:
stackids[i] = -1
foundid[i] = row['foundid']
obsjd[i] = row['obsjd']
rh[i] = row['rh']
rdot[i] = row['rdot']
archivefiles.append(row['archivefile'])
archivefiles = np.array(archivefiles)
i = obsjd >= jd
baseline = archivefiles[~i]
nightly = archivefiles[i]
nightlyids = foundid[i]
# make sure the nightly cutouts haven't been used in
# different stacks
stackid = np.unique(stackids[i * (stackids >= 0)])
if len(stackid) == 0:
stackid = None
elif len(stackid) == 1:
stackid = stackid[0]
else:
msg = (
'One-to-many mapping of stackid to ztf_cutouts.foundid '
'has been violated by stackids {} and {}'
.format(stackid, row['stackid']))
raise StackIDError(msg)
fn = ('{desg}/{desg}-{date}-{prepost}{rh:.3f}-{filt}'
'-ztf-stack.fits').format(
desg=desg2file(desg),
date=night.replace('-', ''),
prepost='pre' if rdot[i].mean() < 0 else 'post',
rh=rh[i].mean(),
filt=filt)
yield count, stackid, fn, nightlyids, nightly, baseline
count -= 1
def _check_target_paths(self, path, fn):
d = os.path.dirname(os.path.join(path, fn))
if not os.path.exists(d):
os.mkdir(d)
return os.path.exists(os.path.join(path, fn))
def _header(self, path, files):
"""New FITS header based on this file list."""
headers = [fits.getheader(os.path.join(path, f))
for f in sorted(files)]
N = len(headers)
def mean_key(headers, key, comment, type):
return (np.mean([type(h[key]) for h in headers]), comment)
h = fits.Header()
h['BUNIT'] = 'e-/s'
h['ORIGIN'] = 'Zwicky Transient Facility', 'Data origin'
h['OBSERVER'] = 'ZTF Robotic Software', 'Observer'
h['INSTRUME'] = 'ZTF/MOSAIC', 'Instrument name'
h['OBSERVAT'] = 'Palomar Observatory', 'Observatory'
h['TELESCOP'] = 'Palomar 48-inch', 'Observatory telescope'
h['OBSLON'] = -116.8597, 'Observatory longitude (deg)'
h['OBSLAT'] = 33.3483, 'Observatory latitude (deg E)'
h['OBSALT'] = 1706., 'Observatory altitude (m)'
h['IMGTYPE'] = 'object', 'Image type'
h['NIMAGES'] = N, 'Number of images in stack'
h['EXPOSURE'] = (sum([_['EXPOSURE'] for _ in headers]),
'Total stack exposure time (s)')
if len(headers) == 0:
return h
h['MAGZP'] = 25.0, 'Magnitude zero point, solar color'
h['MAGZPRMS'] = (
np.sqrt(np.sum([h.get('MAGZPRMS', 0)**2 for h in headers])) / N,
'Mean MAGZP RMS')
h['PCOLOR'] = headers[0]['PCOLOR']
h['CLRCOEFF'] = mean_key(headers, 'CLRCOEFF',
'Mean color coefficient', float)
h['OBSJD1'] = float(headers[0]['OBSJD']), 'First shutter start time'
h['OBSJDN'] = float(headers[-1]['OBSJD']), 'Last shutter start time'
h['OBSJDM'] = mean_key(
headers, 'OBSJD', 'Mean shutter start time', float)
wcsfn = sorted(files)[0]
wcs = WCS(fits.getheader(os.path.join(path, wcsfn),
extname='SANGLE'))
h.update(wcs.to_header())
h['WCSORIGN'] = wcsfn
h['DBPID'] = (','.join([str(_['DBPID']) for _ in headers]),
'Database processed-image IDs')
h['DESG'] = headers[0]['DESG'], 'Target designation'
for k, comment in {
'RH': 'Mean heliocentric distance (au)',
'DELTA': 'Mean observer-target distance (au)',
'PHASE': 'Mean Sun-target-observer angle (deg)',
'RDOT': 'Mean heliocentric radial velocity, km/s',
'SELONG': 'Mean solar elongation, deg',
'SANGLE': 'Mean projected target->Sun position angle, deg',
'VANGLE': 'Mean projected velocity position angle, deg',
'TRUEANOM': 'Mean true anomaly (osculating), deg',
'TMTP': 'Mean T-Tp (osculating), days',
'TGTRA': 'Mean target RA, deg',
'TGTDEC': 'Mean target Dec, deg',
'TGTDRA': 'Mean target RA*cos(dec) rate of change,arcsec/s',
'TGTDDEC': 'Mean target Dec rate of change, arcsec/s',
'TGTRASIG': 'Mean target RA 3-sigma uncertainty, arcsec',
'TGTDESIG': 'Mean target Dec 3-sigma uncertainty, arcsec',
}.items():
try:
h[k] = mean_key(headers, k, comment, float)
except ValueError:
# target rates might be empty strings
h[k] = ''
return h
def _weighted_median(self, stack, unc, axis=0):
# works, but is slow
if stack.shape[axis] == 1:
m = stack
elif stack.shape[axis] == 2:
m = np.ma.average(stack, axis=axis, weights=1/unc**2)
else:
stack = np.random.randint(1, 100, size=shape)
unc = np.sqrt(np.random.randint(1, 100, size=shape))
axis = 2
weight = 1 / unc**2
wstack = weight * stack
i = np.ma.argsort(wstack, axis=2)
a = wstack[list(np.ogrid[[slice(x)
for x in wstack.shape]][:-1])+[i]]
w = weight[list(np.ogrid[[slice(x)
for x in wstack.shape]][:-1])+[i]]
c = np.ma.cumsum(a, axis=2)
c /= np.ma.max(c, axis=2)[:, :, None]
i = | np.ma.apply_along_axis(np.searchsorted, 2, c, [0.5]) | numpy.ma.apply_along_axis |
import cv2
import time
import random
import argparse
from imutils.video import VideoStream
import numpy as np
import copy
ap = argparse.ArgumentParser()
ap.add_argument("-w", "--webcam", type=int, default=0, help="Webcam source, if 0 does not work try changing \
to 1, external webcams might register on 1")
ap.add_argument("-d", "--difficulty", type=int, default=60, help="Control how fast circles spawn. Default 60. Increase to make game easier \
and decrease to make it harder")
args = vars(ap.parse_args())
# constants
isBgCaptured = 0
bgSubThreshold = 60
learningRate = 0
bgModel = None
def remove_background(frame, bgModel, lr):
"""
To remove background from captured region.
Parameters:
frame: Frame/Image
bgModel: Background Subraction Model
lr: Learning Rate for bgModel
"""
fgmask = bgModel.apply(frame, learningRate=lr)
kernel = | np.ones((2, 2), np.uint8) | numpy.ones |
import torch
import torch.nn.functional as F
import argparse
import cv2
import numpy as np
from glob import glob
num_classes = 2
img_height, img_width = 64, 64
GPU = False
torch.manual_seed(0)
class Mynet(torch.nn.Module):
def __init__(self):
super(Mynet, self).__init__()
self.conv1_1 = torch.nn.Conv2d(3, 32, kernel_size=3, padding=1)
self.bn1_1 = torch.nn.BatchNorm2d(32)
self.conv1_2 = torch.nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.bn1_2 = torch.nn.BatchNorm2d(32)
self.conv2_1 = torch.nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.bn2_1 = torch.nn.BatchNorm2d(64)
self.conv2_2 = torch.nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn2_2 = torch.nn.BatchNorm2d(64)
self.conv3_1 = torch.nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.bn3_1 = torch.nn.BatchNorm2d(128)
self.conv3_2 = torch.nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn3_2 = torch.nn.BatchNorm2d(128)
self.conv4_1 = torch.nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.bn4_1 = torch.nn.BatchNorm2d(256)
self.conv4_2 = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn4_2 = torch.nn.BatchNorm2d(256)
self.fc1 = torch.nn.Linear(img_height//16 * img_width//16 * 256, 512)
#self.fc1_d = torch.nn.Dropout2d()
self.fc2 = torch.nn.Linear(512, 512)
self.fc_out = torch.nn.Linear(512, num_classes)
def forward(self, x):
x = F.relu(self.bn1_1(self.conv1_1(x)))
x = F.relu(self.bn1_2(self.conv1_2(x)))
x = F.max_pool2d(x, 2)
x = F.relu(self.bn2_1(self.conv2_1(x)))
x = F.relu(self.bn2_2(self.conv2_2(x)))
x = F.max_pool2d(x, 2)
x = F.relu(self.bn3_1(self.conv3_1(x)))
x = F.relu(self.bn3_2(self.conv3_2(x)))
x = F.max_pool2d(x, 2)
x = F.relu(self.bn4_1(self.conv4_1(x)))
x = F.relu(self.bn4_2(self.conv4_2(x)))
x = F.max_pool2d(x, 2)
x = x.view(-1, img_height//16 * img_width // 16 * 256)
x = F.relu(self.fc1(x))
#x = self.fc1_d(x)
x = F.relu(self.fc2(x))
x = self.fc_out(x)
return x
# get train data
def data_load():
xs = np.ndarray((0, img_height, img_width, 3))
ts = np.ndarray((0))
for dir_path in glob('../../Dataset/train/images/*'):
for path in glob(dir_path + '/*'):
x = cv2.imread(path)
x = cv2.resize(x, (img_width, img_height)).astype(np.float32)
x /= 255.
xs = np.r_[xs, x[None, ...]]
t = np.zeros((1))
if 'akahara' in path:
t = np.array((0))
elif 'madara' in path:
t = np.array((1))
ts = np.r_[ts, t]
xs = xs.transpose(0,3,1,2)
return xs, ts
# train
def train():
# GPU
device = torch.device("cuda" if GPU else "cpu")
# model
model = Mynet().to(device)
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
model.train()
xs, ts = data_load()
# training
mb = 8
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
| np.random.shuffle(train_ind) | numpy.random.shuffle |
"""
Functions for testing ICE and PD calculations.
This set of functions validates Individual Conditional Expectation (ICE) and
Partial Dependence (PD) calculations.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: new BSD
import pytest
import numpy as np
import fatf.transparency.models.feature_influence as ftmfi
import fatf.utils.models as fum
from fatf.exceptions import IncompatibleModelError, IncorrectShapeError
from fatf.utils.testing.arrays import (BASE_NP_ARRAY, BASE_STRUCTURED_ARRAY,
NOT_BASE_NP_ARRAY)
# yapf: disable
ONE_D_ARRAY = np.array([0, 4, 3, 0])
NUMERICAL_NP_ARRAY_TARGET = np.array([2, 0, 1, 1, 0, 2])
NUMERICAL_NP_ARRAY = np.array([
[0, 0, 0.08, 0.69],
[1, 0, 0.03, 0.29],
[0, 1, 0.99, 0.82],
[2, 1, 0.73, 0.48],
[1, 0, 0.36, 0.89],
[0, 1, 0.07, 0.21]])
NUMERICAL_STRUCT_ARRAY = np.array(
[(0, 0, 0.08, 0.69),
(1, 0, 0.03, 0.29),
(0, 1, 0.99, 0.82),
(2, 1, 0.73, 0.48),
(1, 0, 0.36, 0.89),
(0, 1, 0.07, 0.21)],
dtype=[('a', 'i'), ('b', 'i'), ('c', 'f'), ('d', 'f')])
CATEGORICAL_NP_ARRAY = np.array([
['a', 'b', 'c'],
['a', 'f', 'g'],
['b', 'c', 'c']])
CATEGORICAL_STRUCT_ARRAY = np.array(
[('a', 'b', 'c'),
('a', 'f', 'g'),
('b', 'c', 'c')],
dtype=[('a', 'U1'), ('b', 'U1'), ('c', 'U1')])
MIXED_ARRAY = np.array(
[(0, 'a', 0.08, 'a'),
(0, 'f', 0.03, 'bb'),
(1, 'c', 0.99, 'aa'),
(1, 'a', 0.73, 'a'),
(0, 'c', 0.36, 'b'),
(1, 'f', 0.07, 'bb')],
dtype=[('a', 'i'), ('b', 'U1'), ('c', 'f'), ('d', 'U2')])
NUMERICAL_NP_ARRAY_TEST_INT = np.array([
[1, 0, 0, 0],
[0, 0, 0, 0]])
NUMERICAL_NP_ARRAY_TEST = np.array([
[1, 0, 0.03, 0.5],
[0, 0, 0.56, 0.32]])
NUMERICAL_STRUCT_ARRAY_TEST = np.array(
[(1, 0, 0.03, 0.5),
(0, 0, 0.56, 0.32)],
dtype=[('a', 'i'), ('b', 'i'), ('c', 'f'), ('d', 'f')])
NUMERICAL_NP_ICE = np.array([
[[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]],
[[0.0, 0., 1.0],
[0.5, 0., 0.5],
[0.5, 0., 0.5]]])
NUMERICAL_NP_PD = np.array([
[0.50, 0.0, 0.50],
[0.75, 0.0, 0.25],
[0.75, 0.0, 0.25]])
NUMERICAL_NP_ICE_CAT = np.array([
[[1., 0., 0.],
[1., 0., 0.]],
[[0.0, 0., 1.0],
[0.5, 0., 0.5]]])
NUMERICAL_NP_PD_CAT = np.array([
[0.50, 0.0, 0.50],
[0.75, 0.0, 0.25]])
NUMERICAL_NP_ICE_100 = np.array(
[100 * [[1.0, 0.0, 0.0]],
46 * [[0.0, 0.0, 1.0]] + 54 * [[0.5, 0.0, 0.5]]])
NUMERICAL_NP_PD_100 = np.array(
46 * [[0.5, 0.0, 0.5]] + 54 * [[0.75, 0.00, 0.25]])
NUMERICAL_NP_LINESPACE = np.array([0.32, 0.41, 0.5])
NUMERICAL_NP_LINESPACE_CAT = np.array([0.32, 0.5])
NUMERICAL_NP_LINESPACE_100 = np.linspace(0.32, 0.5, 100)
CATEGORICAL_NP_ARRAY_TEST = np.array([
['a', 'f', 'g'],
['b', 'f', 'c']])
CATEGORICAL_STRUCT_ARRAY_TEST = np.array(
[('a', 'f', 'g'),
('b', 'f', 'c')],
dtype=[('a', 'U1'), ('b', 'U1'), ('c', 'U1')])
CATEGORICAL_NP_ARRAY_TARGET = np.array([0, 1, 1])
CATEGORICAL_NP_ICE = np.array([
[[0.5, 0.5],
[0.5, 0.5]],
[[0.0, 1.0],
[0.0, 1.0]]])
CATEGORICAL_NP_PD = np.array([
[0.25, 0.75],
[0.25, 0.75]])
CATEGORICAL_NP_LINESPACE = np.array(['c', 'g'])
MIXED_ARRAY_TEST = np.array(
[(0, 'a', 0.08, 'a'),
(1, 'a', 0.88, 'bb'),
(1, 'f', 0.07, 'bb')],
dtype=[('a', 'i'), ('b', 'U1'), ('c', 'f'), ('d', 'U2')])
MIXED_ARRAY_TARGET = np.array(['a', 'b', 'c', 'a', 'b', 'c'])
MIXED_ICE_NUMERICAL = np.array([
[[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0]],
[[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5]]])
MIXED_PD_NUMERICAL = np.array([
[0.5, 0.25, 0.25],
[0.5, 0.25, 0.25],
[0.5, 0.25, 0.25]])
MIXED_LINESPACE_NUMERICAL = np.array([0, 0.5, 1])
MIXED_ICE_CATEGORICAL = np.array([
[[1.0, 0.0, 0.0],
[0.5, 0.5, 0.0]],
[[0.5, 0.0, 0.5],
[0.0, 0.5, 0.5]]])
MIXED_PD_CATEGORICAL = np.array([
[0.75, 0.0, 0.25],
[0.25, 0.5, 0.25]])
MIXED_LINESPACE_CATEGORICAL = np.array(['a', 'f'])
# yapf: enable
class InvalidModel(object):
"""
Tests for exceptions when a model lacks the ``predict_proba`` method.
"""
def __init__(self):
"""
Initialises not-a-model.
"""
pass
def fit(self, X, y):
"""
Fits not-a-model.
"""
return X, y # pragma: nocover
def predict(self, X):
"""
Predicts not-a-model.
"""
return X # pragma: nocover
def test_is_valid_input():
"""
Tests :func:`fatf.transparency.models.feature_influence._is_valid_input`.
"""
knn_model = fum.KNN()
# Data
msg = 'The input dataset must be a 2-dimensional array.'
with pytest.raises(IncorrectShapeError) as exin:
ftmfi._input_is_valid(ONE_D_ARRAY, None, None, None, None)
assert str(exin.value) == msg
msg = ('The input dataset must only contain base types (textual and '
'numerical).')
with pytest.raises(ValueError) as exin:
ftmfi._input_is_valid(NOT_BASE_NP_ARRAY, None, None, None, None)
assert str(exin.value) == msg
# Model
msg = ('This functionality requires the model to be capable of outputting '
'probabilities via predict_proba method.')
model = InvalidModel()
with pytest.warns(UserWarning) as warning:
with pytest.raises(IncompatibleModelError) as exin:
ftmfi._input_is_valid(BASE_STRUCTURED_ARRAY, model, None, None,
None)
assert str(exin.value) == msg
assert len(warning) == 1
assert str(warning[0].message) == ('The *InvalidModel* (model) class is '
"missing 'predict_proba' method.")
# Feature index
msg = 'Provided feature index is not valid for the input dataset.'
with pytest.raises(IndexError) as exin:
ftmfi._input_is_valid(BASE_STRUCTURED_ARRAY, knn_model, 0, None, None)
assert str(exin.value) == msg
with pytest.raises(IndexError) as exin:
ftmfi._input_is_valid(BASE_NP_ARRAY, knn_model, 'numerical', None,
None)
assert str(exin.value) == msg
# Steps number
msg = 'steps_number parameter has to either be None or an integer.'
with pytest.raises(TypeError) as exin:
ftmfi._input_is_valid(BASE_NP_ARRAY, knn_model, 1, None, 'a')
assert str(exin.value) == msg
msg = 'steps_number has to be at least 2.'
with pytest.raises(ValueError) as exin:
ftmfi._input_is_valid(BASE_NP_ARRAY, knn_model, 1, None, 1)
assert str(exin.value) == msg
# Treat as categorical
msg = 'treat_as_categorical has to either be None or a boolean.'
with pytest.raises(TypeError) as exin:
ftmfi._input_is_valid(BASE_NP_ARRAY, knn_model, 1, 'a', None)
assert str(exin.value) == msg
# Functional
assert ftmfi._input_is_valid(BASE_NP_ARRAY, knn_model, 1, None, 2)
assert ftmfi._input_is_valid(BASE_NP_ARRAY, knn_model, 1, False, 5)
# Steps number will be ignored anyway
assert ftmfi._input_is_valid(BASE_NP_ARRAY, knn_model, 1, True, 2)
def test_interpolate_array():
"""
Tests array interpolation.
This function tests
:func:`fatf.transparency.models.feature_influence._interpolate_array`.
"""
# For a structured and an unstructured *numerical* arrays...
feature_index_num = 1
feature_index_cat = 'b'
#
num_1_min = 0
num_1_max = 1
num_1_unique = np.array([num_1_min, num_1_max])
cat_1_unique = np.array(['b', 'c', 'f'])
#
sar1 = NUMERICAL_NP_ARRAY.copy()
sar1[:, feature_index_num] = num_1_min
sar2 = NUMERICAL_NP_ARRAY.copy()
sar2[:, feature_index_num] = num_1_max
num_1_data_unique = np.stack([sar1, sar2], axis=1)
#
num_1_interpolate_3 = np.array([num_1_min, 0.5, num_1_max])
#
sar = []
for i in num_1_interpolate_3:
sar_i = NUMERICAL_NP_ARRAY.copy()
sar_i[:, feature_index_num] = i
sar.append(sar_i)
num_1_data_interpolate_3 = np.stack(sar, axis=1)
#
sar = []
for i in cat_1_unique:
sar_i = CATEGORICAL_NP_ARRAY.copy()
sar_i[:, feature_index_num] = i
sar.append(sar_i)
cat_1_interpolate = np.stack(sar, axis=1)
# ...treat a numerical feature as a categorical one
# ......with default steps number (without)
interpolated_data, interpolated_values = ftmfi._interpolate_array(
NUMERICAL_NP_ARRAY, feature_index_num, True, None)
assert np.array_equal(interpolated_data, num_1_data_unique)
assert np.array_equal(interpolated_values, num_1_unique)
# ......with steps number
interpolated_data, interpolated_values = ftmfi._interpolate_array(
NUMERICAL_NP_ARRAY, feature_index_num, True, 3)
assert np.array_equal(interpolated_data, num_1_data_unique)
assert np.array_equal(interpolated_values, num_1_unique)
# ...treat a numerical feature as a numerical one
# ......with default steps number (without) -- this cannot be achieved
pass
# ......with steps number
interpolated_data, interpolated_values = ftmfi._interpolate_array(
NUMERICAL_STRUCT_ARRAY, feature_index_cat, False, 3)
for index, column in enumerate(NUMERICAL_STRUCT_ARRAY.dtype.names):
assert np.allclose(interpolated_data[:, :][column],
num_1_data_interpolate_3[:, :, index])
assert np.array_equal(interpolated_values, num_1_interpolate_3)
# ...treat a categorical feature as a categorical one
# ......with default steps number (without)
interpolated_data, interpolated_values = ftmfi._interpolate_array(
CATEGORICAL_NP_ARRAY, feature_index_num, True, None)
assert np.array_equal(interpolated_data, cat_1_interpolate)
assert np.array_equal(interpolated_values, cat_1_unique)
# ......with steps number
interpolated_data, interpolated_values = ftmfi._interpolate_array(
CATEGORICAL_STRUCT_ARRAY, feature_index_cat, True, 3)
for index, column in enumerate(CATEGORICAL_STRUCT_ARRAY.dtype.names):
assert np.array_equal(interpolated_data[:, :][column],
cat_1_interpolate[:, :, index])
assert np.array_equal(interpolated_values, cat_1_unique)
# ...treat a categorical feature as a numerical one
# ......with default steps number (without)
pass
# ......with steps number
pass
###########################################################################
numerical_column = 'a'
numreical_linespace_cat = np.array([0, 1])
sar = []
for i in numreical_linespace_cat:
sar_i = MIXED_ARRAY.copy()
sar_i[numerical_column] = i
sar.append(sar_i)
numerical_interpolation_cat = np.stack(sar, axis=1)
#
numreical_linespace_num = np.array([0, 0.5, 1])
sar = []
for i in numreical_linespace_num:
# Redo the type
dtype = [(name, numreical_linespace_num.dtype)
if name == numerical_column
else (name, MIXED_ARRAY.dtype[name])
for name in MIXED_ARRAY.dtype.names] # yapf: disable
sar_i = MIXED_ARRAY.astype(dtype)
sar_i[numerical_column] = i
sar.append(sar_i)
numerical_interpolation_num = np.stack(sar, axis=1)
categorical_column = 'b'
categorical_linespace = np.array(['a', 'c', 'f'])
sar = []
for i in categorical_linespace:
sar_i = MIXED_ARRAY.copy()
sar_i[categorical_column] = i
sar.append(sar_i)
categorical_interpolation = np.stack(sar, axis=1)
# Now for a mixed structured array -- categorical feature
# ...treat a categorical feature as a categorical one
# ......with default steps number (without)
interpolated_data, interpolated_values = ftmfi._interpolate_array(
MIXED_ARRAY, categorical_column, True, None)
assert np.array_equal(interpolated_values, categorical_linespace)
for column in MIXED_ARRAY.dtype.names:
assert np.array_equal(interpolated_data[:, :][column],
categorical_interpolation[:, :][column])
# ......with steps number
interpolated_data, interpolated_values = ftmfi._interpolate_array(
MIXED_ARRAY, categorical_column, True, 42)
assert np.array_equal(interpolated_values, categorical_linespace)
for column in MIXED_ARRAY.dtype.names:
assert np.array_equal(interpolated_data[:, :][column],
categorical_interpolation[:, :][column])
# Now for a mixed structured array -- numerical feature
# ...treat a numerical feature as a categorical one
# ......with default steps number (without)
interpolated_data, interpolated_values = ftmfi._interpolate_array(
MIXED_ARRAY, numerical_column, True, None)
assert np.array_equal(interpolated_values, numreical_linespace_cat)
for column in MIXED_ARRAY.dtype.names:
assert np.array_equal(interpolated_data[:, :][column],
numerical_interpolation_cat[:, :][column])
# ......with steps number
interpolated_data, interpolated_values = ftmfi._interpolate_array(
MIXED_ARRAY, numerical_column, True, 3)
assert np.array_equal(interpolated_values, numreical_linespace_cat)
for column in MIXED_ARRAY.dtype.names:
assert np.array_equal(interpolated_data[:, :][column],
numerical_interpolation_cat[:, :][column])
# ...treat a numerical feature as a numerical one
# ......with steps number
interpolated_data, interpolated_values = ftmfi._interpolate_array(
MIXED_ARRAY, numerical_column, False, 3)
assert np.array_equal(interpolated_values, numreical_linespace_num)
for column in MIXED_ARRAY.dtype.names:
assert np.array_equal(interpolated_data[:, :][column],
numerical_interpolation_num[:, :][column])
def test_filter_rows():
"""
Tests :func:`fatf.transparency.models.feature_influence._filter_rows`.
"""
value_error = ('{} rows element {} is out of bounds. There are only {} '
'rows in the input dataset.')
type_error_include = ('The include_rows parameters must be either None or '
'a list of integers indicating which rows should be '
'included in the computation.')
type_error_include_list = 'Include rows element *{}* is not an integer.'
type_error_exclude = ('The exclude_rows parameters must be either None or '
'a list of integers indicating which rows should be '
'excluded in the computation.')
type_error_exclude_list = 'Exclude rows element *{}* is not an integer.'
with pytest.raises(TypeError) as exin:
ftmfi._filter_rows('wrong', None, 1)
assert str(exin.value) == type_error_include
with pytest.raises(TypeError) as exin:
ftmfi._filter_rows([0, 1, 'wrong', 4, 5], None, 7)
assert str(exin.value) == type_error_include_list.format('wrong')
with pytest.raises(TypeError) as exin:
ftmfi._filter_rows(None, 'wrong', 1)
assert str(exin.value) == type_error_exclude
with pytest.raises(TypeError) as exin:
ftmfi._filter_rows(None, [0, 1, 'wrong', 4, 5], 7)
assert str(exin.value) == type_error_exclude_list.format('wrong')
with pytest.raises(ValueError) as exin:
ftmfi._filter_rows(None, [0, 1, 3, 5], 4)
assert str(exin.value) == value_error.format('Exclude', 5, 4)
with pytest.raises(ValueError) as exin:
ftmfi._filter_rows(None, 5, 4)
assert str(exin.value) == value_error.format('Exclude', 5, 4)
with pytest.raises(ValueError) as exin:
ftmfi._filter_rows([0, 1, 3, 5], None, 4)
assert str(exin.value) == value_error.format('Include', 5, 4)
with pytest.raises(ValueError) as exin:
ftmfi._filter_rows(5, None, 4)
assert str(exin.value) == value_error.format('Include', 5, 4)
row_number = 13
row_none = None
row_digit = 3
row_list = [3, 4, 7, 12]
all_rows = list(range(13))
all_but_one = [0, 1, 2] + list(range(4, 13))
all_but_list = [0, 1, 2, 5, 6, 8, 9, 10, 11]
row_but_one = [4, 7, 12]
three = [3]
empty = []
rows = ftmfi._filter_rows(row_none, row_none, row_number)
assert np.array_equal(rows, all_rows)
rows = ftmfi._filter_rows(row_none, row_digit, row_number)
assert np.array_equal(rows, all_but_one)
rows = ftmfi._filter_rows(row_none, row_list, row_number)
assert np.array_equal(rows, all_but_list)
rows = ftmfi._filter_rows(row_none, empty, row_number)
assert np.array_equal(rows, all_rows)
rows = ftmfi._filter_rows(empty, row_none, row_number)
assert np.array_equal(rows, empty)
rows = ftmfi._filter_rows(row_digit, row_none, row_number)
assert np.array_equal(rows, three)
rows = ftmfi._filter_rows(row_digit, row_digit, row_number)
assert np.array_equal(rows, empty)
rows = ftmfi._filter_rows(row_digit, row_list, row_number)
assert np.array_equal(rows, empty)
rows = ftmfi._filter_rows(row_digit, empty, row_number)
assert np.array_equal(rows, three)
rows = ftmfi._filter_rows(empty, row_digit, row_number)
assert np.array_equal(rows, empty)
rows = ftmfi._filter_rows(row_list, row_none, row_number)
assert np.array_equal(rows, row_list)
rows = ftmfi._filter_rows(row_list, row_digit, row_number)
assert np.array_equal(rows, row_but_one)
rows = ftmfi._filter_rows(row_list, row_list, row_number)
assert np.array_equal(rows, empty)
rows = ftmfi._filter_rows(row_list, empty, row_number)
assert np.array_equal(rows, row_list)
rows = ftmfi._filter_rows(empty, row_list, row_number)
assert np.array_equal(rows, empty)
def test_merge_ice_arrays():
"""
Tests :func:`fatf.transparency.models.feature_influence.merge_ice_arrays`.
"""
type_error = ('The ice_arrays_list should be a list of numpy arrays that '
'represent Individual Conditional Expectation.')
value_error_empty = 'Cannot merge 0 arrays.'
value_error_numerical = ('The ice_array list should only contain '
'numerical arrays.')
value_error_struct = ('The ice_array list should only contain '
'unstructured arrays.')
incorrect_shape_3d = 'The ice_array should be 3-dimensional.'
value_error_shape = ('All of the ICE arrays need to be constructed for '
'the same number of classes and the same number of '
'samples for the selected feature (the second and '
'the third dimension of the ice array).')
with pytest.raises(TypeError) as exin:
ftmfi.merge_ice_arrays('string')
assert str(exin.value) == type_error
with pytest.raises(ValueError) as exin:
ftmfi.merge_ice_arrays([])
assert str(exin.value) == value_error_empty
with pytest.raises(ValueError) as exin:
ftmfi.merge_ice_arrays([np.array([1, 2, 'a', 4, 5])])
assert str(exin.value) == value_error_numerical
with pytest.raises(ValueError) as exin:
ftmfi.merge_ice_arrays(
[np.array([[[4]]]),
np.array([(1, )], dtype=[('a', int)])])
assert str(exin.value) == value_error_struct
with pytest.raises(IncorrectShapeError) as exin:
ftmfi.merge_ice_arrays([np.array([[[4]]]), np.array([2])])
assert str(exin.value) == incorrect_shape_3d
arr_1 = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 9, 8]],
[[7, 6, 5, 4], [3, 2, 1, 0], [1, 2, 3, 4]]])
arr_2 = np.array([[[7, 6, 5], [3, 2, 1], [1, 2, 3]]])
arr_3 = np.array([[[7, 6, 5, 4], [3, 2, 1, 0]]])
arr_4 = np.array([[[7, 6, 5, 4], [3, 2, 1, 0]]], dtype=float)
with pytest.raises(ValueError) as exin:
ftmfi.merge_ice_arrays([arr_1, arr_1, arr_2])
assert str(exin.value) == value_error_shape
with pytest.raises(ValueError) as exin:
ftmfi.merge_ice_arrays([arr_1, arr_3, arr_2])
assert str(exin.value) == value_error_shape
with pytest.raises(ValueError) as exin:
ftmfi.merge_ice_arrays([arr_3, arr_3, arr_4])
assert str(exin.value) == value_error_shape
# Unstructured ICE arrays
selected_column_index = 1
smaller_numerical_array = np.array([[0, 0, 0.08, 0.69],
[1, 0, 0.03, 0.29],
[0, 1, 0.99, 0.82]]) # yapf: disable
concat = np.concatenate([NUMERICAL_NP_ARRAY, smaller_numerical_array])
arr_a = []
arr_b = []
arr_c = []
for i in range(3):
arr_i = NUMERICAL_NP_ARRAY.copy()
arr_i[:, selected_column_index] = i
arr_a.append(arr_i)
arr_i = smaller_numerical_array.copy()
arr_i[:, selected_column_index] = i
arr_b.append(arr_i)
arr_i = concat.copy()
arr_i[:, selected_column_index] = i
arr_c.append(arr_i)
unstructured_array_a = np.stack(arr_a, axis=1)
unstructured_array_b = np.stack(arr_b, axis=1)
unstructured_array_c = np.stack(arr_c, axis=1)
comp = ftmfi.merge_ice_arrays([unstructured_array_a, unstructured_array_b])
assert np.array_equal(comp, unstructured_array_c)
def test_individual_conditional_expectation():
"""
Tests Individual Conditional Expectation calculations.
Tests :func:`fatf.transparency.models.feature_influence.
individual_conditional_expectation` function.
"""
user_warning = ('Selected feature is categorical (string-base elements), '
'however the treat_as_categorical was set to False. Such '
'a combination is not possible. The feature will be '
'treated as categorical.')
steps_n_warning = ('The steps_number parameter will be ignored as the '
'feature is being treated as categorical.')
clf = fum.KNN(k=2)
clf.fit(NUMERICAL_NP_ARRAY, NUMERICAL_NP_ARRAY_TARGET)
clf_struct = fum.KNN(k=2)
clf_struct.fit(NUMERICAL_STRUCT_ARRAY, NUMERICAL_NP_ARRAY_TARGET)
# Test for type generalisation int -> float for classic arrays
ice, linespace = ftmfi.individual_conditional_expectation(
NUMERICAL_NP_ARRAY_TEST_INT,
clf,
0,
treat_as_categorical=False,
steps_number=3)
assert np.allclose(
ice,
np.array([[[0, 0, 1], [0.5, 0, 0.5], [1, 0, 0]],
[[0, 0, 1], [0.5, 0, 0.5], [1, 0, 0]]]))
assert np.allclose(linespace, np.array([0, 0.5, 1]))
# Not structured and structured -- numerical
# ...numerical column
# ......indicate as numerical
# .........with a steps number
ice, linespace = ftmfi.individual_conditional_expectation(
NUMERICAL_STRUCT_ARRAY_TEST,
clf_struct,
'd',
treat_as_categorical=False,
steps_number=3)
assert np.allclose(ice, NUMERICAL_NP_ICE)
assert np.allclose(linespace, NUMERICAL_NP_LINESPACE)
# .........without a steps number
ice, linespace = ftmfi.individual_conditional_expectation(
NUMERICAL_NP_ARRAY_TEST, clf, 3, treat_as_categorical=False)
assert np.allclose(ice, NUMERICAL_NP_ICE_100)
assert np.allclose(linespace, NUMERICAL_NP_LINESPACE_100)
# ......indicate as categorical
# .........with a steps number
with pytest.warns(UserWarning) as warning:
ice, linespace = ftmfi.individual_conditional_expectation(
NUMERICAL_NP_ARRAY_TEST,
clf,
3,
treat_as_categorical=True,
steps_number=3)
assert len(warning) == 1
assert str(warning[0].message) == steps_n_warning
assert np.allclose(ice, NUMERICAL_NP_ICE_CAT)
assert np.allclose(linespace, NUMERICAL_NP_LINESPACE_CAT)
# .........without a steps number
ice, linespace = ftmfi.individual_conditional_expectation(
NUMERICAL_STRUCT_ARRAY_TEST,
clf_struct,
'd',
treat_as_categorical=True)
assert np.allclose(ice, NUMERICAL_NP_ICE_CAT)
assert np.allclose(linespace, NUMERICAL_NP_LINESPACE_CAT)
# ......indicate as None
# .........with a steps number
ice, linespace = ftmfi.individual_conditional_expectation(
NUMERICAL_NP_ARRAY_TEST, clf, 3, steps_number=3)
assert np.allclose(ice, NUMERICAL_NP_ICE)
assert np.allclose(linespace, NUMERICAL_NP_LINESPACE)
# .........without a steps number
ice, linespace = ftmfi.individual_conditional_expectation(
NUMERICAL_NP_ARRAY_TEST, clf, 3)
assert np.allclose(ice, NUMERICAL_NP_ICE_100)
assert np.allclose(linespace, NUMERICAL_NP_LINESPACE_100)
clf = fum.KNN(k=2)
clf.fit(CATEGORICAL_NP_ARRAY, CATEGORICAL_NP_ARRAY_TARGET)
clf_struct = fum.KNN(k=2)
clf_struct.fit(CATEGORICAL_STRUCT_ARRAY, CATEGORICAL_NP_ARRAY_TARGET)
# Not structured and structured -- categorical
# ...categorical column
# ......indicate as numerical
# .........with a steps number
with pytest.warns(UserWarning) as warning:
ice, linespace = ftmfi.individual_conditional_expectation(
CATEGORICAL_STRUCT_ARRAY_TEST,
clf_struct,
'c',
treat_as_categorical=False,
steps_number=3)
assert len(warning) == 1
assert str(warning[0].message) == user_warning
assert np.allclose(ice, CATEGORICAL_NP_ICE)
assert np.array_equal(linespace, CATEGORICAL_NP_LINESPACE)
# .........without a steps number
with pytest.warns(UserWarning) as warning:
ice, linespace = ftmfi.individual_conditional_expectation(
CATEGORICAL_NP_ARRAY_TEST, clf, 2, treat_as_categorical=False)
assert len(warning) == 1
assert str(warning[0].message) == user_warning
assert np.allclose(ice, CATEGORICAL_NP_ICE)
assert np.array_equal(linespace, CATEGORICAL_NP_LINESPACE)
# ......indicate as categorical
# .........with a steps number
with pytest.warns(UserWarning) as warning:
ice, linespace = ftmfi.individual_conditional_expectation(
CATEGORICAL_STRUCT_ARRAY_TEST,
clf_struct,
'c',
treat_as_categorical=True,
steps_number=42)
assert len(warning) == 1
assert str(warning[0].message) == steps_n_warning
assert np.allclose(ice, CATEGORICAL_NP_ICE)
assert np.array_equal(linespace, CATEGORICAL_NP_LINESPACE)
# .........without a steps number
ice, linespace = ftmfi.individual_conditional_expectation(
CATEGORICAL_NP_ARRAY_TEST, clf, 2, treat_as_categorical=True)
assert np.allclose(ice, CATEGORICAL_NP_ICE)
assert np.array_equal(linespace, CATEGORICAL_NP_LINESPACE)
# ......indicate as None
# .........with a steps number
with pytest.warns(UserWarning) as warning:
ice, linespace = ftmfi.individual_conditional_expectation(
CATEGORICAL_NP_ARRAY_TEST, clf, 2, steps_number=42)
assert len(warning) == 1
assert str(warning[0].message) == steps_n_warning
assert np.allclose(ice, CATEGORICAL_NP_ICE)
assert np.array_equal(linespace, CATEGORICAL_NP_LINESPACE)
# .........without a steps number
ice, linespace = ftmfi.individual_conditional_expectation(
CATEGORICAL_STRUCT_ARRAY_TEST, clf_struct, 'c')
assert np.allclose(ice, CATEGORICAL_NP_ICE)
assert np.array_equal(linespace, CATEGORICAL_NP_LINESPACE)
# Mixed array; include/exclude some rows
clf = fum.KNN(k=2)
clf.fit(MIXED_ARRAY, MIXED_ARRAY_TARGET)
ice, linespace = ftmfi.individual_conditional_expectation(
MIXED_ARRAY_TEST, clf, 'a', steps_number=3, exclude_rows=1)
assert np.allclose(ice, MIXED_ICE_NUMERICAL)
assert np.array_equal(linespace, MIXED_LINESPACE_NUMERICAL)
ice, linespace = ftmfi.individual_conditional_expectation(
MIXED_ARRAY_TEST,
clf,
'a',
steps_number=3,
include_rows=[0, 2],
exclude_rows=[1])
assert np.allclose(ice, MIXED_ICE_NUMERICAL)
assert np.array_equal(linespace, MIXED_LINESPACE_NUMERICAL)
ice, linespace = ftmfi.individual_conditional_expectation(
MIXED_ARRAY_TEST,
clf,
'a',
steps_number=3,
include_rows=[0, 2],
exclude_rows=1)
assert np.allclose(ice, MIXED_ICE_NUMERICAL)
assert np.array_equal(linespace, MIXED_LINESPACE_NUMERICAL)
ice, linespace = ftmfi.individual_conditional_expectation(
MIXED_ARRAY_TEST, clf, 'b', exclude_rows=1)
assert np.allclose(ice, MIXED_ICE_CATEGORICAL)
assert np.array_equal(linespace, MIXED_LINESPACE_CATEGORICAL)
ice, linespace = ftmfi.individual_conditional_expectation(
MIXED_ARRAY_TEST, clf, 'b', include_rows=[0, 2], exclude_rows=1)
assert np.allclose(ice, MIXED_ICE_CATEGORICAL)
assert np.array_equal(linespace, MIXED_LINESPACE_CATEGORICAL)
def test_partial_dependence_ice():
"""
Tests Partial Dependence calculations from an ICE array.
Tests :func:`fatf.transparency.models.feature_influence.
partial_dependence_ice` function.
"""
value_error_structured = 'The ice_array should not be structured.'
value_error_not_numerical = 'The ice_array should be purely numerical.'
incorrect_shape_error = 'The ice_array should be 3-dimensional.'
with pytest.raises(ValueError) as exin:
ftmfi.partial_dependence_ice(np.array([(1, )], dtype=[('a', int)]))
assert str(exin.value) == value_error_structured
with pytest.raises(ValueError) as exin:
ftmfi.partial_dependence_ice(np.array([[1, 'a', 2]]))
assert str(exin.value) == value_error_not_numerical
with pytest.raises(IncorrectShapeError) as exin:
ftmfi.partial_dependence_ice(ONE_D_ARRAY)
assert str(exin.value) == incorrect_shape_error
# Test PD
pd = ftmfi.partial_dependence_ice(NUMERICAL_NP_ICE)
assert np.array_equal(pd, NUMERICAL_NP_PD)
pd = ftmfi.partial_dependence_ice(NUMERICAL_NP_ICE_CAT)
assert np.array_equal(pd, NUMERICAL_NP_PD_CAT)
pd = ftmfi.partial_dependence_ice(NUMERICAL_NP_ICE_100)
assert np.array_equal(pd, NUMERICAL_NP_PD_100)
pd = ftmfi.partial_dependence_ice(CATEGORICAL_NP_ICE)
assert np.array_equal(pd, CATEGORICAL_NP_PD)
pd = ftmfi.partial_dependence_ice(MIXED_ICE_NUMERICAL)
assert np.array_equal(pd, MIXED_PD_NUMERICAL)
pd = ftmfi.partial_dependence_ice(MIXED_ICE_CATEGORICAL)
assert np.array_equal(pd, MIXED_PD_CATEGORICAL)
# Test row exclusion
pd = ftmfi.partial_dependence_ice(MIXED_ICE_CATEGORICAL, include_rows=0)
assert np.array_equal(pd, MIXED_ICE_CATEGORICAL[0])
pd = ftmfi.partial_dependence_ice(MIXED_ICE_CATEGORICAL, include_rows=[0])
assert np.array_equal(pd, MIXED_ICE_CATEGORICAL[0])
pd = ftmfi.partial_dependence_ice(MIXED_ICE_CATEGORICAL, exclude_rows=1)
assert np.array_equal(pd, MIXED_ICE_CATEGORICAL[0])
pd = ftmfi.partial_dependence_ice(MIXED_ICE_CATEGORICAL, exclude_rows=[1])
assert np.array_equal(pd, MIXED_ICE_CATEGORICAL[0])
pd = ftmfi.partial_dependence_ice(
MIXED_ICE_CATEGORICAL, include_rows=[1, 0], exclude_rows=[1])
assert np.array_equal(pd, MIXED_ICE_CATEGORICAL[0])
def test_partial_dependence():
"""
Tests Partial Dependence calculations.
Tests :func:`fatf.transparency.models.feature_influence.
partial_dependence` function.
"""
clf = fum.KNN(k=2)
clf.fit(NUMERICAL_NP_ARRAY, NUMERICAL_NP_ARRAY_TARGET)
clf_struct = fum.KNN(k=2)
clf_struct.fit(NUMERICAL_STRUCT_ARRAY, NUMERICAL_NP_ARRAY_TARGET)
# Test PD
pd, linespace = ftmfi.partial_dependence(
NUMERICAL_STRUCT_ARRAY_TEST,
clf_struct,
'd',
treat_as_categorical=False,
steps_number=3)
assert | np.allclose(pd, NUMERICAL_NP_PD) | numpy.allclose |
"""
Conjunto de classes para organizar os dados em formatos reconhecidos pelo banco de dados
"""
from __future__ import annotations
import numpy as np
import _pickle as pickle
import os
import mne
class Epochs:
def __init__(self, x, classe: str, subject_name: str, data_type=None) -> None:
# Epoca original de dados
self.data = x
# Classe do conjunto de epocas
self.classe = classe
# Nome do sujeito ao qual esta instância está associada
self.subject_name = subject_name
# data_type
self.data_type = data_type
# Pasta onde ficará este conjunto de epocas
self.epc_folderpath = os.path.join("subject_files", subject_name, f"epochs_{data_type}")
# bloco para verificar principalmente se há mais de uma matriz de epocas
try:
self.n_trials = self.data.shape[2]
except IndexError:
n_ch = self.data.shape[0]
n_samp = self.data.shape[1]
self.n_trials = 1
self.data = self.data.reshape(n_ch, n_samp, 1)
# Adiciona uma epoca no conjunto original de dados
def add_epoch(self, new_data: Epochs):
self.data = | np.append(self.data, new_data.data, axis=2) | numpy.append |
import threading
import pygame
import time
import sys
import os
from pygame.locals import *
import numpy as np
from collections import deque
import torch
from torch.autograd import Variable
from Tank_AI import Linear_QNet, QTrainer
import random
FPS = 1000
SQM = 64
EAGLE_Y = []
EAGLE_G = []
BULLETS_Y_objects = []
BULLETS_Y_RECT = []
BULLETS_G_objects = []
BULLETS_G_RECT = []
BACKGROUND_RECT = []
GRASS_RECT = []
WATER_RECT = []
BRICK_RECT = []
BRICK_RECT_MANY = []
BRICK_RECT_MINI = []
SOLID_RECT = []
MAPPING = [
'HHHHHHHHHHHHHHHHH',
'HHHHHHHHHHHHHHHHH',
'HHHHSGOOOBOOSGOHH',
'HHHHGBOWBGBOOBGHH',
'HHHHOG1BGSGB2GOHH',
'HHHHGBOOBGBWOBGHH',
'HHHHOGSOOBOOOGSHH',
'HHHHHHHHHHHHHHHHH',
'HHHHHHHHHHHHHHHHH'
]
TANK_YELLOW_IMG = [pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_up.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_down.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_left.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_right.png'))), (52,52))]
TANK_GREEN_IMG = [pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_up.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_down.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_left.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_right.png'))), (52,52))]
BULLET_IMG = [pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_u.png'))), (16,22)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_d.png'))), (16,22)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_l.png'))), (22,16)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_r.png'))), (22,16))]
WATER_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_water_1.png'))), (64,64))
WATER_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_water_2.png'))), (64,64))
BRICK_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_brick.png'))), (64,64))
BRICK_IMG_MINI = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_brick_mini.png'))), (32,32))
GRASS_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_grass.png'))), (64,64))
SOLIDWALL_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_solid_wall.png'))), (64,64))
EAGLE_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_eagle_1.png'))), (64,64))
EAGLE_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_eagle_2.png'))), (64,64))
EXPLOSION_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_1.png'))), (64,64))
EXPLOSION_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_2.png'))), (64,64))
EXPLOSION_3_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_3.png'))), (64,64))
EXPLOSION_GREAT_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_great_1.png'))), (128,128))
EXPLOSION_GREAT_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_great_2.png'))), (128,128))
INVICIBLE_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'invicible_1.png'))), (52,52))
INVICIBLE_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'invicible_2.png'))), (52,52))
BACKGROUND_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'background.png'))), (64,64))
MAX_MEMORY = 100_000_000
BATCH_SIZE = 1000
LR = 0.0001
class AI_YELLOW:
def __init__(self):
self.state = []
self.gamma = 0.5
self.score = 0
self.memory = deque(maxlen=MAX_MEMORY)
self.model = Linear_QNet(24, 256, 64, 5)
self.trainer = QTrainer(self.model, lr=LR, gamma=self.gamma)
def get_state(self, a, b, c, d, e, f, g, h, i, j):
self.state = []
self.state_n = [a, b, c, d, e, f, g, h, i, j]
for n in self.state_n:
for mn in n:
self.get_state_loop(mn)
return self.state
def get_state_loop(self, m):
self.state.append(m)
def get_action(self, state, frame):
final_move = [0,0,0,0,0]
if frame > 500:
state0 = torch.tensor(state, dtype=float)
state0 = state0.double()
prediction = self.model(state0.float())
move = torch.argmax(prediction).item()
move_0 = torch.softmax(prediction, dim=-1).detach().numpy()
x = random.choices([0,1,2,3,4],move_0)
final_move[move] = 1
else:
rand = random.randint(0,4)
final_move[rand] = 1
return final_move
def print_state(self, state, frame, score):
if frame % 100 == 0:
print(f'---ŻÓŁTY------klata nr. {frame}--------wynik sumaryczny {score}---------')
print(len(state))
print(f'Pozycja Zółtego czołgu względem Zielonego czołgu {state[0:4]}')
#print(f'Pozycja Zółtego czołgu względem własnego orła {state[4:8]}')
#print(f'Pozycja Zółtego czołgu względem obcego orła {state[8:12]}')
print(f'Zwrot swojego czołgu {state[4:8]}')
print(f'Obecność swojego pocisku {state[8]}')
print(f'Obecność przeciwnika pocisku {state[9]}')
print(f'Kierunek swojego pocisku {state[10:14]}')
print(f'Kierunek przeciwnika pocisku {state[14:18]}')
print(f'Zwrot czołgu do obiektów 1.Tło - {state[18]} 2.Ściana - {state[19]} 3.Orzeł własny - ??? 4.Orzeł przeciwnika - ??? 5.Przeciwnik - {state[20]}')
print(f'Czy Żółty czołg utkną? {state[21]}')
print(f'Czy zielony czołg otrzymał obrażenia? {state[22]}')
print(f'Czy żółty czołg otrzymał obrażenia? {state[23]}')
#print(f'Czy orzeł zółtego otrzymał obrażenia przez żółtego? {state[23]}')
#print(f'Czy orzeł zielonego otrzymał obrażenia przez żółtego? {state[24]}')
print('------------------------------------------------------------')
def train_short_memory(self, satte_old, action, reward, nest_state, done):
self.trainer.train_step(satte_old, action, reward, nest_state, done)
def remember(self, satte_old, action, reward, nest_state, done):
self.memory.append((satte_old, action, reward, nest_state, done))
def final_score(self, reward):
self.score += reward
return "{0:0.2f}".format(self.score)
class AI_GREEN:
def __init__(self):
self.state = []
self.gamma = 0.5
self.score = 0
self.memory = deque(maxlen=MAX_MEMORY)
self.model = Linear_QNet(24, 256, 64, 5)
self.trainer = QTrainer(self.model, lr=LR, gamma=self.gamma)
def get_state(self, a, b, c, d, e, f, g, h, i, j):
self.state = []
self.state_n = [a, b, c, d, e, f, g, h, i, j]
for n in self.state_n:
for mn in n:
self.get_state_loop(mn)
return self.state
def get_state_loop(self, m):
self.state.append(m)
def get_action(self, state, frame):
final_move = [0,0,0,0,0]
if frame > 500:
state0 = torch.tensor(state, dtype=float)
state0 = state0.double()
prediction = self.model(state0.float())
move = torch.argmax(prediction).item()
move_0 = torch.softmax(prediction, dim=-1).detach().numpy()
x = random.choices([0,1,2,3,4],move_0)
final_move[move] = 1
else:
rand = random.randint(0,4)
final_move[rand] = 1
return final_move
def print_state(self, state, frame, score):
if frame % 100 == 0:
print(f'---ZIELONY------klata nr. {frame}--------wynik sumaryczny {score}---------')
print(len(state))
print(f'Pozycja Zielonego czołgu względem Zółtego czołgu {state[0:4]}')
#print(f'Pozycja Zielonego czołgu względem własnego orła {state[4:8]}')
#print(f'Pozycja Zielonego czołgu względem obcego orła {state[8:12]}')
print(f'Zwrot swojego czołgu {state[4:8]}')
print(f'Obecność swojego pocisku {state[8]}')
print(f'Obecność przeciwnika pocisku {state[9]}')
print(f'Kierunek swojego pocisku {state[10:14]}')
print(f'Kierunek przeciwnika pocisku {state[14:18]}')
print(f'Zwrot czołgu do obiektów 1.Tło - {state[18]} 2.Ściana - {state[19]} 3.Orzeł własny - ??? 4.Orzeł przeciwnika - ??? 5.Przeciwnik - {state[20]}')
print(f'Czy Zielony czołg utkną? {state[21]}')
print(f'Czy Zółty czołg otrzymał obrażenia? {state[22]}')
print(f'Czy Zielony czołg otrzymał obrażenia? {state[23]}')
#print(f'Czy orzeł zielonego otrzymał obrażenia przez zielonego? {state[32]}')
#print(f'Czy orzeł żółtego otrzymał obrażenia przez zielonego? {state[33]}')
print('------------------------------------------------------------')
def train_short_memory(self, satte_old, action, reward, nest_state, done):
self.trainer.train_step(satte_old, action, reward, nest_state, done)
def remember(self, satte_old, action, reward, nest_state, done):
self.memory.append((satte_old, action, reward, nest_state, done))
def final_score(self, reward):
self.score += reward
return "{0:0.2f}".format(self.score)
class On_Hit_By_Yellow:
def __init__(self, dir):
self.dir = dir
self.x_exp = 0
self.y_exp = 0
self.frame_l = 0
self.frame_h = 0
self.break_bullet_one_time_flag = True
self.allow_explosion_little = False
self.allow_explosion_hard = False
def brick_on_hit(self, i, e):
BRICK_RECT_TEMP = []
for b in BRICK_RECT_MINI:
if e.colliderect(b):
BRICK_RECT_TEMP.append(b)
if len(BRICK_RECT_TEMP) >= 1:
for x in BRICK_RECT_TEMP:
BRICK_RECT_MINI.remove(x)
self.explosion_find_location()
self.allow_explosion_hard = True
return True
return False
def solid_on_hit(self, i, e):
for b in SOLID_RECT:
if e.colliderect(b):
self.explosion_find_location()
self.allow_explosion_little = True
return True
return False
def background_on_hit(self, i, e):
for b in BACKGROUND_RECT:
if e.colliderect(b):
self.explosion_find_location()
self.allow_explosion_little = True
return True
return False
def green_tank_on_hit(self, i, e, TG_MASK, TG_CLASS, TG_DEST, TG_INVI):
if e.colliderect(TG_MASK) and TG_INVI is False:
print('Green Tank took damage')
self.does_enemy_tank_got_hit = True
TG_CLASS.__init__()
return True
return False
def eagle_greens_tank_on_hit(self, i, e, TG_CLASS, TY_CLASS, MAPPING):
for b in EAGLE_G:
if e.colliderect(b):
TG_CLASS.__init__()
TY_CLASS.__init__()
print('Green\'s eagle gas been destroyed')
self.does_enemy_eagle_got_hit = True
return True
return False
def eagle_yellows_tank_on_hit(self, i, e, TG_CLASS, TY_CLASS, MAPPING):
for b in EAGLE_Y:
if e.colliderect(b):
TG_CLASS.__init__()
TY_CLASS.__init__()
print('Yellow\'s eagle gas been destroyed')
self.does_ally_eagle_fot_hit = True
return True
return False
def enemys_bullet_on_hit(self, i, e):
for b in BULLETS_G_RECT:
if e.colliderect(b):
if len(BULLETS_G_RECT) >= 1:
BULLETS_G_objects.pop(i)
BULLETS_G_RECT.pop(i)
return True
return False
def break_bullet(self, i):
if self.break_bullet_one_time_flag:
BULLETS_Y_objects.pop(i)
BULLETS_Y_RECT.pop(i)
self.break_bullet_one_time_flag = False
def explosion_find_location(self):
for k in BULLETS_Y_RECT:
if self.dir == 'right':
self.x_exp = k.x
self.y_exp = k.y - 26
if self.dir == 'left':
self.x_exp = k.x
self.y_exp = k.y - 26
if self.dir == 'up':
self.x_exp = k.x - 26
self.y_exp = k.y
if self.dir == 'down':
self.x_exp = k.x - 26
self.y_exp = k.y
def draw_explosion_little(self, screen, elf):
if self.allow_explosion_little and elf:
if self.frame_l == 0:
screen.blit(EXPLOSION_1_IMG,(self.x_exp, self.y_exp))
if self.frame_l == 1:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_l == 2:
screen.blit(EXPLOSION_1_IMG,(self.x_exp, self.y_exp))
if self.frame_l >= 2:
self.allow_explosion_little = False
elf = False
self.frame_l += 0
else:
self.frame_l += 1
def draw_explosion_hard(self, screen, ehf):
if self.allow_explosion_hard and ehf:
if self.frame_h <= 1:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_h >= 2 and self.frame_h < 4:
screen.blit(EXPLOSION_3_IMG,(self.x_exp, self.y_exp))
if self.frame_h >= 4:
ehf = False
self.allow_explosion_hard = False
self.frame_h = 0
else:
self.frame_h += 1
class On_Hit_By_Green:
def __init__(self, dir):
self.dir = dir
self.x_exp = 0
self.y_exp = 0
self.frame_l = 0
self.frame_h = 0
self.break_bullet_one_time_flag = True
self.allow_explosion_little = False
self.allow_explosion_hard = False
def brick_on_hit(self, i, e):
BRICK_RECT_TEMP = []
for b in BRICK_RECT_MINI:
if e.colliderect(b):
BRICK_RECT_TEMP.append(b)
if len(BRICK_RECT_TEMP) >= 1:
for x in BRICK_RECT_TEMP:
BRICK_RECT_MINI.remove(x)
self.explosion_find_location()
self.allow_explosion_hard = True
return True
return False
def solid_on_hit(self, i, e):
for b in SOLID_RECT:
if e.colliderect(b):
self.explosion_find_location()
self.allow_explosion_little = True
return True
return False
def background_on_hit(self, i, e):
for b in BACKGROUND_RECT:
if e.colliderect(b):
self.explosion_find_location()
self.allow_explosion_little = True
return True
return False
def yellow_tank_on_hit(self, i, e, TY_MASK, TG_CLASS, TY_DEST, TY_INVI):
if e.colliderect(TY_MASK) and TY_INVI is False:
TY_DEST = True
TG_CLASS.__init__()
print('Yellow Tank took damage')
self.does_enemy_tank_got_hit = True
return True
return False
def eagle_greens_tank_on_hit(self, i, e, TG_CLASS, TY_CLASS, MAPPING):
for b in EAGLE_G:
if e.colliderect(b):
TG_CLASS.__init__()
TY_CLASS.__init__()
print('Green\'s eagle has been destroyed')
self.does_ally_eagle_got_hit = True
return True
return False
def eagle_yellows_tank_on_hit(self, i, e, TG_CLASS, TY_CLASS, MAPPING):
for b in EAGLE_Y:
if e.colliderect(b):
TG_CLASS.__init__()
TY_CLASS.__init__()
print('Yellow\'s eagle has been destroyed')
self.does_enemy_eagle_got_hit = True
return True
return False
def enemys_bullet_on_hit(self, i, e):
for b in BULLETS_Y_RECT:
if e.colliderect(b):
if len(BULLETS_Y_RECT) >= 1:
BULLETS_Y_objects.pop(i)
BULLETS_Y_RECT.pop(i)
return True
return False
def break_bullet(self, i):
if self.break_bullet_one_time_flag:
BULLETS_G_objects.pop(i)
BULLETS_G_RECT.pop(i)
self.break_bullet_one_time_flag = False
def explosion_find_location(self):
for k in BULLETS_G_RECT:
if self.dir == 'right':
self.x_exp = k.x
self.y_exp = k.y - 26
if self.dir == 'left':
self.x_exp = k.x
self.y_exp = k.y - 26
if self.dir == 'up':
self.x_exp = k.x - 26
self.y_exp = k.y
if self.dir == 'down':
self.x_exp = k.x - 26
self.y_exp = k.y
def draw_explosion_little(self, screen, elf):
if self.allow_explosion_little and elf:
if self.frame_l == 0:
screen.blit(EXPLOSION_1_IMG,(self.x_exp, self.y_exp))
if self.frame_l == 1:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_l == 2:
screen.blit(EXPLOSION_1_IMG,(self.x_exp, self.y_exp))
if self.frame_l >= 2:
self.allow_explosion_little = False
elf = False
self.frame_l += 0
else:
self.frame_l += 1
def draw_explosion_hard(self, screen, ehf):
if self.allow_explosion_hard and ehf:
if self.frame_h == 0:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_h == 1:
screen.blit(EXPLOSION_3_IMG,(self.x_exp, self.y_exp))
if self.frame_h == 2:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_h >= 2:
ehf = False
self.allow_explosion_hard = False
self.frame_h = 0
else:
self.frame_h += 1
class Mapping:
def __init__(self):
self.x = 0
self.y = 0
self.frames = 0
self.convert_entities()
def convert_entities(self):
for row in MAPPING:
for col in row:
if col == 'H':
BACKGROUND_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == 'G':
GRASS_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == 'W':
WATER_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == 'B':
#BRICK_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
#BRICK_RECT_MANY.append(BRICK_IMG)
#self.convert_entities_mini()
pass
elif col == 'S':
SOLID_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == '3':
EAGLE_Y.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == '4':
EAGLE_G.append(pygame.Rect((self.x,self.y,SQM,SQM)))
self.x+=SQM
self.y+=SQM
self.x=0
def convert_entities_mini(self):
self.x_mini = self.x
self.y_mini = self.y
for i in range(2):
for j in range(2):
BRICK_RECT_MINI.append(pygame.Rect((self.x_mini,self.y_mini,SQM/2,SQM/2)))
self.x_mini += SQM/2
self.y_mini += SQM/2
self.x_mini = self.x
def draw_props(self, screen):
for x in BACKGROUND_RECT:
#pygame.draw.rect(screen,(89, 89, 89),x)
screen.blit(BACKGROUND_IMG, (x.x,x.y))
for x in GRASS_RECT:
#pygame.draw.rect(screen,(51, 204, 51),x)
screen.blit(GRASS_IMG, (x.x,x.y))
for x in WATER_RECT:
#pygame.draw.rect(screen,(0, 153, 255),x)
if self.frames <= 30:
screen.blit(WATER_1_IMG, (x.x,x.y))
else:
screen.blit(WATER_2_IMG, (x.x,x.y))
'''
for x in BRICK_RECT:
screen.blit(BRICK_IMG, (x.x,x.y))
for x in BRICK_RECT_MINI:
screen.blit(BRICK_IMG_MINI, (x.x,x.y))
'''
for x in SOLID_RECT:
screen.blit(SOLIDWALL_IMG, (x.x,x.y))
for x in EAGLE_Y:
screen.blit(EAGLE_1_IMG, (x.x,x.y))
for x in EAGLE_G:
screen.blit(EAGLE_1_IMG, (x.x,x.y))
self.frames += 1
if self.frames == 60:
self.frames = 0
class Bullet_TY(object):
def __init__(self,x,y,dir):
self.dir = dir
self.x = x
self.y = y
self.vel = 22
if self.dir == 'right':
self.x = x+15
self.y = y+18
self.width = 22
self.height = 16
elif self.dir == 'left':
self.x = x+15
self.y = y+18
self.width = 22
self.height = 16
elif self.dir == 'down':
self.x = x+18
self.y = y+15
self.width = 16
self.height = 22
elif self.dir == 'up':
self.x = x+18
self.y = y+7
self.width = 16
self.height = 22
def move(self):
if self.dir == 'right':
self.x += self.vel
elif self.dir == 'left':
self.x -= self.vel
elif self.dir == 'down':
self.y += self.vel
elif self.dir == 'up':
self.y -= self.vel
def movehitbox(self, rect):
if self.dir == 'right':
rect.x += self.vel
elif self.dir == 'left':
rect.x -= self.vel
elif self.dir == 'down':
rect.y += self.vel
elif self.dir == 'up':
rect.y -= self.vel
def draw(self, screen):
if self.dir == 'right':
self.BULLET_DRAW = BULLET_IMG[3]
elif self.dir == 'left':
self.BULLET_DRAW = BULLET_IMG[2]
elif self.dir == 'down':
self.BULLET_DRAW = BULLET_IMG[1]
elif self.dir == 'up':
self.BULLET_DRAW = BULLET_IMG[0]
screen.blit(self.BULLET_DRAW, (self.x, self.y))
class Tank_Yellow:
def __init__(self):
self.x = 0
self.y = 0
self.actions = [False, False, False, False]
self.TY_face = TANK_YELLOW_IMG[3]
self.TY_face_txt = 'right'
self.tank_yellow_shoot_allow = True
self.tank_yellow_shoot_cooldown = False
self.explosion_l_flag = False
self.explosion_h_flag = False
self.yellow_tank_destroyed = False
self.yellow_tank_invicible = True
self.frames_inv = 0
self.bullet_dir = None
self.eagle_yellows_tank_on_hit_state = False
self.green_tank_on_hit_state = False
self.eagle_greens_tank_on_hit_state = False
self.AI_player = True
self.Human_player = True
for row in MAPPING:
for col in row:
if col == '1':
self.ty_pos_x = self.x
self.ty_pos_y = self.y
self.x+=SQM
self.y+=SQM
self.x=0
self.TY_mask = pygame.Rect(self.ty_pos_x, self.ty_pos_y, 52, 52)
def bind(self, event):
if event.type == KEYDOWN:
if event.key == K_d:
self.actions[0] = True
elif event.key == K_a:
self.actions[1] = True
elif event.key == K_s:
self.actions[2] = True
elif event.key == K_w:
self.actions[3] = True
if event.type == KEYUP:
if event.key == K_d:
self.actions[0] = False
elif event.key == K_a:
self.actions[1] = False
elif event.key == K_s:
self.actions[2] = False
elif event.key == K_w:
self.actions[3] = False
def move_tank(self, action):
self.movement = [0,0]
if action[0]:
self.movement[0] += 8
self.TY_face = TANK_YELLOW_IMG[3]
self.TY_face_txt = 'right'
elif action[1]:
self.movement[0] -= 8
self.TY_face = TANK_YELLOW_IMG[2]
self.TY_face_txt = 'left'
elif action[3]:
self.movement[1] -= 8
self.TY_face = TANK_YELLOW_IMG[0]
self.TY_face_txt = 'up'
elif action[2]:
self.movement[1] += 8
self.TY_face = TANK_YELLOW_IMG[1]
self.TY_face_txt = 'down'
self.TY_mask.x += self.movement[0]
self.collisions_h = self.collision_test()
for tile in self.collisions_h:
if self.movement[0] > 0:
self.TY_mask.right = tile.left
if self.movement[0] < 0:
self.TY_mask.left = tile.right
self.TY_mask.y += self.movement[1]
self.collisions_v = self.collision_test()
for tile in self.collisions_v:
if self.movement[1] > 0:
self.TY_mask.bottom = tile.top
if self.movement[1] < 0:
self.TY_mask.top = tile.bottom
self.collisions_sum = [self.collisions_h, self.collisions_v]
def collision_test(self):
colli = []
for back in BACKGROUND_RECT:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in SOLID_RECT:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in BRICK_RECT:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in WATER_RECT:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in EAGLE_Y:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in EAGLE_G:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in BRICK_RECT_MINI:
if self.TY_mask.colliderect(back):
colli.append(back)
return colli
def draw(self, screen, flag_1, flag_2):
if flag_1 is False:
screen.blit(self.TY_face,(self.TY_mask.x,self.TY_mask.y))
if flag_2:
if (self.frames_inv % 4) == 0 or (self.frames_inv % 4) == 1:
screen.blit(INVICIBLE_1_IMG,(self.TY_mask.x,self.TY_mask.y))
elif (self.frames_inv % 4) == 2 or (self.frames_inv % 4) == 3:
screen.blit(INVICIBLE_2_IMG,(self.TY_mask.x,self.TY_mask.y))
if self.frames_inv >= 45:
self.yellow_tank_invicible = False
self.frames_inv += 1
def bind_shoot(self, Flag):
if Flag:
keys = pygame.key.get_pressed()
if keys[pygame.K_r]:
flag_temp = True
self.execute_shoot(flag_temp)
def execute_shoot(self, Flag):
if Flag:
self.frames = 0
self.tank_yellow_shoot_cooldown = True
self.tank_yellow_shoot_allow = False
self.b_ty = Bullet_TY(self.TY_mask.x, self.TY_mask.y, self.TY_face_txt)
BULLETS_Y_objects.append(self.b_ty)
BULLETS_Y_RECT.append(pygame.Rect(self.b_ty.x,self.b_ty.y,self.b_ty.width,self.b_ty.height))
self.OHBY = On_Hit_By_Yellow(self.b_ty.dir)
self.bullet_dir = self.b_ty.dir
def shoot_delay(self, flag):
if flag:
if len(BULLETS_Y_RECT) == 0 and self.frames > 20:
self.tank_yellow_shoot_allow = True
self.tank_yellow_shoot_cooldown = False
self.bullet_dir = None
self.frames += 1
def bullets_onhit(self, TG_MASK, TG_CLASS, TY_CLASS, TG_DEST, TG_INVI, MAPPING, screen):
if len(BULLETS_Y_RECT) >= 1:
for i, e in enumerate(BULLETS_Y_RECT):
self.explosion_h_flag = True
self.explosion_l_flag = True
self.brick_on_hit_state = self.OHBY.brick_on_hit(i, e)
self.background_on_hit_state = self.OHBY.background_on_hit(i, e)
self.green_tank_on_hit_state = self.OHBY.green_tank_on_hit(i, e, TG_MASK, TG_CLASS, TG_DEST, TG_INVI)
self.solid_on_hit_state = self.OHBY.solid_on_hit(i, e)
self.eagle_greens_tank_on_hit_state = self.OHBY.eagle_greens_tank_on_hit(i, e, TG_CLASS, TY_CLASS, MAPPING)
self.eagle_yellows_tank_on_hit_state = self.OHBY.eagle_yellows_tank_on_hit(i, e, TG_CLASS, TY_CLASS, MAPPING)
self.enemys_bullet_on_hit_state = self.OHBY.enemys_bullet_on_hit(i, e)
self.states = [self.brick_on_hit_state,
self.background_on_hit_state,
self.green_tank_on_hit_state,
self.solid_on_hit_state,
self.eagle_greens_tank_on_hit_state,
self.eagle_yellows_tank_on_hit_state,
self.enemys_bullet_on_hit_state]
for xi in self.states:
if xi:
self.OHBY.break_bullet(i)
if self.explosion_l_flag or self.explosion_h_flag:
self.OHBY.draw_explosion_little(screen, self.explosion_l_flag)
self.OHBY.draw_explosion_hard(screen, self.explosion_h_flag)
def yellow_tank_position_relative_with_green_tank(self, TY_mask, TG_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
if TY_mask.x <= TG_mask.x:
flags[0] = True
if TY_mask.x >= TG_mask.x:
flags[1] = True
if TY_mask.y >= TG_mask.y:
flags[2] = True
if TY_mask.y <= TG_mask.y:
flags[3] = True
return flags
def yellow_eagle_position_relative_with_yellow_tank(self, TY_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
for i in EAGLE_Y:
if TY_mask.x <= i.x:
flags[0] = True
if TY_mask.x >= i.x:
flags[1] = True
if TY_mask.y >= i.y:
flags[2] = True
if TY_mask.y <= i.y:
flags[3] = True
return flags
def green_eagle_position_relative_with_yellow_tank(self, TY_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
for i in EAGLE_G:
if TY_mask.x <= i.x:
flags[0] = True
if TY_mask.x >= i.x:
flags[1] = True
if TY_mask.y >= i.y:
flags[2] = True
if TY_mask.y <= i.y:
flags[3] = True
return flags
def yellow_tank_direction(self):
#flags [R,L,U,D]
flags = [False, False, False, False]
if self.TY_face_txt == 'right':
flags[0] = True
elif self.TY_face_txt == 'left':
flags[1] = True
elif self.TY_face_txt == 'up':
flags[2] = True
elif self.TY_face_txt == 'down':
flags[3] = True
return flags
def yellow_tank_bullet_presence(self):
flag = False
if self.tank_yellow_shoot_allow is True:
flag = False
elif self.tank_yellow_shoot_allow is False:
flag = True
return [flag]
def yellow_tank_own_bullet_direction(self, dir, pres):
#flags [R,L,U,D]
flags = [False, False, False, False]
if pres:
if dir == 'right':
flags[0] = True
elif dir == 'left':
flags[1] = True
elif dir == 'up':
flags[2] = True
elif dir == 'down':
flags[3] = True
return flags
def yellow_tank_faced_to_entity_solid(self, dir, TY_MASK, TG_MASK, win):
self.xn = TY_MASK.x + 26
self.yn = TY_MASK.y + 26
if dir[0] is True:
for i in range(44):
self.xn += 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
pygame.draw.rect(win, (255, 0, 0), self.sample)
self.loop_logic_background = self.yellow_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.yellow_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
#self.loop_logic_enemys_eagle = self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_G)
self.loop_logic_enemy = self.yellow_tank_faced_to_enemy_loop(self.sample, TG_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = np.where(self.logic_array == True)
if len(self.logic_array_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
if dir[1] is True:
for i in range(44):
self.xn -= 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
pygame.draw.rect(win, (255, 0, 0), self.sample)
self.loop_logic_background = self.yellow_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.yellow_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
#self.loop_logic_enemys_eagle = self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_G)
self.loop_logic_enemy = self.yellow_tank_faced_to_enemy_loop(self.sample, TG_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = np.where(self.logic_array == True)
if len(self.logic_array_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
if dir[2] is True:
for i in range(44):
self.yn -= 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
pygame.draw.rect(win, (255, 0, 0), self.sample)
self.loop_logic_background = self.yellow_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.yellow_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
#self.loop_logic_enemys_eagle = self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_G)
self.loop_logic_enemy = self.yellow_tank_faced_to_enemy_loop(self.sample, TG_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = np.where(self.logic_array == True)
if len(self.logic_array_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
if dir[3] is True:
for i in range(44):
self.yn += 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
pygame.draw.rect(win, (255, 0, 0), self.sample)
self.loop_logic_background = self.yellow_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.yellow_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
#self.loop_logic_enemys_eagle = self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_G)
self.loop_logic_enemy = self.yellow_tank_faced_to_enemy_loop(self.sample, TG_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = np.where(self.logic_array == True)
if len(self.logic_array_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
def yellow_tank_faced_to_entity_loop(self, sample, entity):
self.sample = sample
for ni in entity:
if self.sample.colliderect(ni):
return True
return False
def yellow_tank_faced_to_enemy_loop(self, sample, TG_MASK):
self.sample = sample
if self.sample.colliderect(TG_MASK):
return True
return False
def yellow_tank_stuck(self, colli):
if len(colli[0]) >= 1 or len(colli[1]) >= 1:
return [True]
return [False]
def green_tank_got_hit(self, flag):
if self.green_tank_on_hit_state:
self.green_tank_on_hit_state = False
print('Żółty czołg zniszczył zielony czołg')
return [True]
else:
return [False]
def yellow_eagle_got_hit_by_yellow(self, flag):
if self.eagle_yellows_tank_on_hit_state:
self.eagle_yellows_tank_on_hit_state = False
print('Żółty czołg zniszczył swojego orła')
return [True]
else:
return [False]
def green_eagle_got_hit_by_yellow(self, flag):
if self.eagle_greens_tank_on_hit_state:
self.eagle_greens_tank_on_hit_state = False
print('Żółty czołg zniszczył orła przeciwnika')
return [True]
else:
return [False]
def yellow_tank_collision_sensor(self, TY_MASK):
self.xs = TY_MASK.x - 2
self.ys = TY_MASK.y - 2
self.coli_sensor = pygame.Rect(self.xs,self.ys,56,56)
for n in SOLID_RECT:
if self.coli_sensor.colliderect(n):
return [True]
for n in WATER_RECT:
if self.coli_sensor.colliderect(n):
return [True]
for n in BACKGROUND_RECT:
if self.coli_sensor.colliderect(n):
return [True]
return [False]
def play_step(self, action, green_tank_got_hit_by_yellow, yellow_tank_got_hit_by_green, yellow_eagle_got_hit_by_yellow, green_eagle_got_hit_by_yellow, yellow_tank_collision_sensor_state, frame_counter_idle):
self.move_it(action)
REWARD = 0
GAME_OVER = False
if yellow_tank_collision_sensor_state[0]:
REWARD = - 0.1
elif green_tank_got_hit_by_yellow[0]:
GAME_OVER = True
REWARD = 50
elif yellow_tank_got_hit_by_green[0]:
GAME_OVER = True
REWARD = -50
elif yellow_eagle_got_hit_by_yellow[0]:
GAME_OVER = True
REWARD = -150
elif green_eagle_got_hit_by_yellow[0]:
GAME_OVER = True
REWARD = 150
elif frame_counter_idle >= 1000:
REWARD = - 10
GAME_OVER = True
return REWARD, GAME_OVER
def move_it(self, action):
#[RLUDS]
self.move_tank(action)
if action[4] == 1:
self.execute_shoot(self.tank_yellow_shoot_allow)
def restart(self):
self.TY_mask.x = self.ty_pos_x
self.TY_mask.y = self.ty_pos_y
class Tank_Green:
def __init__(self):
self.x = 0
self.y = 0
self.actions = [False, False, False, False]
self.TG_face = TANK_GREEN_IMG[2]
self.TG_face_txt = 'left'
self.tank_green_shoot_allow = True
self.tank_green_shoot_cooldown = False
self.explosion_l_flag = False
self.explosion_h_flag = False
self.pos_init_find = True
self.green_tank_destroyed = False
self.green_tank_invicible = True
self.frames_inv = 0
self.bullet_dir = None
self.eagle_greens_tank_on_hit_state = False
self.yellow_tank_on_hit_state = False
self.eagle_yellows_tank_on_hit_state = False
self.AI_player = True
self.Human_player = True
for row in MAPPING:
for col in row:
if col == '2':
self.tg_pos_x = self.x
self.tg_pos_y = self.y
self.x+=SQM
self.y+=SQM
self.x=0
self.TG_mask = pygame.Rect(self.tg_pos_x, self.tg_pos_y, 52, 52)
def bind(self, event):
if event.type == KEYDOWN:
if event.key == K_d:
self.actions[0] = True
elif event.key == K_a:
self.actions[1] = True
elif event.key == K_s:
self.actions[2] = True
elif event.key == K_w:
self.actions[3] = True
if event.type == KEYUP:
if event.key == K_d:
self.actions[0] = False
elif event.key == K_a:
self.actions[1] = False
elif event.key == K_s:
self.actions[2] = False
elif event.key == K_w:
self.actions[3] = False
def move_tank(self, action):
self.movement = [0,0]
if action[0]:
self.movement[0] += 8
self.TG_face = TANK_GREEN_IMG[3]
self.TG_face_txt = 'right'
elif action[1]:
self.movement[0] -= 8
self.TG_face = TANK_GREEN_IMG[2]
self.TG_face_txt = 'left'
elif action[3]:
self.movement[1] -= 8
self.TG_face = TANK_GREEN_IMG[0]
self.TG_face_txt = 'up'
elif action[2]:
self.movement[1] += 8
self.TG_face = TANK_GREEN_IMG[1]
self.TG_face_txt = 'down'
self.TG_mask.x += self.movement[0]
self.collisions_h = self.collision_test()
for tile in self.collisions_h:
if self.movement[0] > 0:
self.TG_mask.right = tile.left
if self.movement[0] < 0:
self.TG_mask.left = tile.right
self.TG_mask.y += self.movement[1]
self.collisions_v = self.collision_test()
for tile in self.collisions_v:
if self.movement[1] > 0:
self.TG_mask.bottom = tile.top
if self.movement[1] < 0:
self.TG_mask.top = tile.bottom
self.collisions_sum = [self.collisions_h, self.collisions_v]
def collision_test(self):
colli = []
for back in BACKGROUND_RECT:
if self.TG_mask.colliderect(back):
colli.append(back)
for back in SOLID_RECT:
if self.TG_mask.colliderect(back):
colli.append(back)
for back in BRICK_RECT:
if self.TG_mask.colliderect(back):
colli.append(back)
for back in WATER_RECT:
if self.TG_mask.colliderect(back):
colli.append(back)
for back in EAGLE_Y:
if self.TG_mask.colliderect(back):
colli.append(back)
for back in EAGLE_G:
if self.TG_mask.colliderect(back):
colli.append(back)
for back in BRICK_RECT_MINI:
if self.TG_mask.colliderect(back):
colli.append(back)
return colli
def bind_shoot(self, Flag):
if Flag:
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
flag_temp = True
self.execute_shoot(flag_temp)
def execute_shoot(self, Flag):
if Flag:
self.frames = 0
self.tank_green_shoot_cooldown = True
self.tank_green_shoot_allow = False
self.b_tg = Bullet_TY(self.TG_mask.x, self.TG_mask.y, self.TG_face_txt)
BULLETS_G_objects.append(self.b_tg)
BULLETS_G_RECT.append(pygame.Rect(self.b_tg.x,self.b_tg.y,self.b_tg.width,self.b_tg.height))
self.OHBG = On_Hit_By_Green(self.b_tg.dir)
self.bullet_dir = self.b_tg.dir
def shoot_delay(self, flag):
if flag:
if len(BULLETS_G_RECT) == 0 and self.frames > 20:
self.tank_green_shoot_allow = True
self.tank_green_shoot_cooldown = False
self.bullet_dir = None
self.frames += 1
def bullets_onhit(self, TY_MASK, TG_CLASS, TY_CLASS, TY_DEST, TY_INVI, MAPPING,screen):
if len(BULLETS_G_RECT) >= 1:
for i, e in enumerate(BULLETS_G_RECT):
self.explosion_l_flag = True
self.explosion_h_flag = True
self.brick_on_hit_state = self.OHBG.brick_on_hit(i, e)
self.background_on_hit_state = self.OHBG.background_on_hit(i, e)
self.yellow_tank_on_hit_state = self.OHBG.yellow_tank_on_hit(i, e, TY_MASK, TG_CLASS, TY_DEST, TY_INVI)
self.solid_on_hit_state = self.OHBG.solid_on_hit(i, e)
self.eagle_greens_tank_on_hit_state = self.OHBG.eagle_greens_tank_on_hit(i, e, TG_CLASS, TY_CLASS, MAPPING)
self.eagle_yellows_tank_on_hit_state = self.OHBG.eagle_yellows_tank_on_hit(i, e, TG_CLASS, TY_CLASS, MAPPING)
self.enemys_bullet_on_hit_state = self.OHBG.enemys_bullet_on_hit(i, e)
self.states = [self.brick_on_hit_state,
self.background_on_hit_state,
self.yellow_tank_on_hit_state,
self.solid_on_hit_state,
self.eagle_greens_tank_on_hit_state,
self.eagle_yellows_tank_on_hit_state,
self.enemys_bullet_on_hit_state]
for xi in self.states:
if xi:
self.OHBG.break_bullet(i)
if self.explosion_l_flag or self.explosion_h_flag:
self.OHBG.draw_explosion_little(screen, self.explosion_l_flag)
self.OHBG.draw_explosion_hard(screen, self.explosion_h_flag)
def draw(self, screen, flag_1, flag_2):
if flag_1 is False:
screen.blit(self.TG_face,(self.TG_mask.x,self.TG_mask.y))
if flag_2:
if (self.frames_inv % 4) == 0 or (self.frames_inv % 4) == 1:
screen.blit(INVICIBLE_1_IMG,(self.TG_mask.x,self.TG_mask.y))
elif (self.frames_inv % 4) == 2 or (self.frames_inv % 4) == 3:
screen.blit(INVICIBLE_2_IMG,(self.TG_mask.x,self.TG_mask.y))
if self.frames_inv >= 45:
self.green_tank_invicible = False
self.frames_inv += 1
def green_tank_position_relative_with_yellow_tank(self, TY_mask, TG_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
if TG_mask.x <= TY_mask.x:
flags[0] = True
if TG_mask.x >= TY_mask.x:
flags[1] = True
if TG_mask.y >= TY_mask.y:
flags[2] = True
if TG_mask.y <= TY_mask.y:
flags[3] = True
return flags
def green_eagle_position_relative_with_green_tank(self, TG_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
for i in EAGLE_G:
if TG_mask.x <= i.x:
flags[0] = True
if TG_mask.x >= i.x:
flags[1] = True
if TG_mask.y >= i.y:
flags[2] = True
if TG_mask.y <= i.y:
flags[3] = True
return flags
def yellow_eagle_position_relative_with_green_tank(self, TG_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
for i in EAGLE_G:
if TG_mask.x <= i.x:
flags[0] = True
if TG_mask.x >= i.x:
flags[1] = True
if TG_mask.y >= i.y:
flags[2] = True
if TG_mask.y <= i.y:
flags[3] = True
return flags
def green_tank_direction(self):
#flags [R,L,U,D]
flags = [False, False, False, False]
if self.TG_face_txt == 'right':
flags[0] = True
elif self.TG_face_txt == 'left':
flags[1] = True
elif self.TG_face_txt == 'up':
flags[2] = True
elif self.TG_face_txt == 'down':
flags[3] = True
return flags
def green_tank_bullet_presence(self):
flag = False
if self.tank_green_shoot_allow is True:
flag = False
elif self.tank_green_shoot_allow is False:
flag = True
return [flag]
def green_tank_own_bullet_direction(self, dir, pres):
#flags [R,L,U,D]
flags = [False, False, False, False]
if pres:
if dir == 'right':
flags[0] = True
elif dir == 'left':
flags[1] = True
elif dir == 'up':
flags[2] = True
elif dir == 'down':
flags[3] = True
return flags
def green_tank_faced_to_entity_solid(self, dir, TY_MASK, TG_MASK):
self.xn = TG_MASK.x + 26
self.yn = TG_MASK.y + 26
if dir[0] is True:
for i in range(44):
self.xn += 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
self.loop_logic_background = self.green_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.green_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.green_tank_faced_to_entity_loop(self.sample, EAGLE_G)
#self.loop_logic_enemys_eagle = self.green_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
self.loop_logic_enemy = self.green_tank_faced_to_enemy_loop(self.sample, TY_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = np.where(self.logic_array == True)
if len(self.logic_array_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
if dir[1] is True:
for i in range(44):
self.xn -= 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
self.loop_logic_background = self.green_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.green_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.green_tank_faced_to_entity_loop(self.sample, EAGLE_G)
#self.loop_logic_enemys_eagle = self.green_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
self.loop_logic_enemy = self.green_tank_faced_to_enemy_loop(self.sample, TY_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = | np.where(self.logic_array == True) | numpy.where |
from tvm import relay
import tvm
from collage.pattern_manager.utils import is_function_node
from collage.pattern_manager.cost_func import *
from collage.optimizer.custom_fusion_pass import CustomFusionPass
from workloads.torch_workloads import get_network_from_torch
from workloads.relay_workloads import get_network_from_relay
from tvm.contrib import graph_executor as runtime
import numpy as np
import argparse
from tvm import autotvm, auto_scheduler
from collage.utility.debug_helper import *
from workloads.torch_workloads import *
from e2e_perf_logger import *
import time
import os
from collage.pattern_manager.pattern_registry import PatternRegistry
from collage.utility.visualize import visualize_network
from collage.optimizer.custom_fusion_pass import get_opt_info_tag
def setup_attrs_ours(net, net_name, hw_name, batch_size):
net = net.with_attr(NETWORK_FUNC_ATTR, net_name)
net = net.with_attr(HW_FUNC_ATTR, hw_name)
net = net.with_attr(BATCH_SIZE_ATTR, batch_size)
return net
# Setup attribute for CuDNN backend baseline
def setup_attrs_single_backend_baseline(net, net_name, hw_name, batch_size, single_backend_id):
net = net.with_attr("CustomFusionPass", CustomFusionPass.SINGLE_BACKEND_BASELINE)
net = net.with_attr(NETWORK_FUNC_ATTR, net_name)
net = net.with_attr(HW_FUNC_ATTR, hw_name)
net = net.with_attr(BATCH_SIZE_ATTR, batch_size)
net = net.with_attr(SINGLE_BACKEND_ATTR, single_backend_id)
return net
def measure_end_to_end_perf_tensorrt(mod, params, target_str, shape_dict, hw_name):
from tvm.relay.op.contrib.tensorrt import partition_for_tensorrt
mod, config = partition_for_tensorrt(mod, params)
# Debug to check if TRT supports ops of interest
net_expr = mod["main"]
print(f"After partition : {net_expr}")
with tvm.transform.PassContext(opt_level=OPT_LEVEL.get(), config={'relay.ext.tensorrt.options': config}):
lib = relay.build(mod, target=target_str, params=params)
lib.export_library('compiled_tensorrt.so')
# Debugging BERT-FULL
from tvm.relay.transform.utility.visualize import visualize_network
visualize_network(mod["main"], "o3_bertfull_trt_debug")
dev = tvm.gpu(0)
loaded_lib = tvm.runtime.load_module('compiled_tensorrt.so')
module = tvm.contrib.graph_executor.GraphModule(loaded_lib['default'](dev))
# Setup execution
for input_name, input_shape in shape_dict.items():
input_data = np.random.uniform(-1, 1, size=input_shape).astype("float32")
module.set_input(input_name, input_data)
ftimer = module.module.time_evaluator("run", dev, number=NUM_MEASUREMENTS_PER_REPEAT_E2E, repeat=NUM_REPEATS_E2E)
mean_perf, std_perf = measure(ftimer, True, hw_name)
return mean_perf, std_perf, module
def measure_end_to_end_perf_dnnl(mod, params, target_str, shape_dict, hw_name, args):
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
raise Exception("skip because DNNL codegen is not available")
return
opt_pass = tvm.transform.Sequential(
[
tvm.relay.transform.InferType(),
tvm.relay.transform.SimplifyInference(),
tvm.relay.transform.FoldConstant(),
tvm.relay.transform.FoldScaleAxis(),
tvm.relay.transform.AnnotateTarget("dnnl"),
tvm.relay.transform.MergeCompilerRegions(),
tvm.relay.transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=OPT_LEVEL.get(), disabled_pass=["AlterOpLayout"]):
mod = opt_pass(mod)
# Debug: visualize IR
# opt_info_tag = get_opt_info_tag(args.network, hw_name, args.batch_size)
# visualize_network(mod["main"], f"{opt_info_tag}_dnnl")
with tvm.transform.PassContext(opt_level=OPT_LEVEL.get()):
lib = relay.build(mod, target=target_str, params=params)
dev = tvm.cpu(0)
kwargs = {}
lib.export_library('compiled_dnnl.so', fcompile=False, **kwargs)
loaded_lib = tvm.runtime.load_module('compiled_dnnl.so')
module = tvm.contrib.graph_executor.GraphModule(loaded_lib['default'](dev))
#module = tvm.contrib.graph_executor.create(json, lib, dev)
assert(module is not None)
# Setup execution
for input_name, input_shape in shape_dict.items():
input_data = np.random.uniform(-1, 1, size=input_shape).astype("float32")
module.set_input(input_name, input_data)
ftimer = module.module.time_evaluator("run", dev, number=NUM_MEASUREMENTS_PER_REPEAT_E2E, repeat=NUM_REPEATS_E2E)
mean_perf, std_perf = measure(ftimer, True, hw_name)
return mean_perf, std_perf, module
# No AlterOpLayout
def build_and_measure_autotvm_without_alter_layout(net, params, target_str, shape_dict, hw_name):
# else:
with autotvm.apply_history_best(get_autotvm_log_path(hw_name)):
with tvm.transform.PassContext(opt_level=OPT_LEVEL.get(), disabled_pass=["AlterOpLayout"]):
lib = relay.build(net, target_str, params=params)
logging.info(f"We successfully built the network")
# Create workload
dev = tvm.device(target_str, 0)
module = runtime.GraphModule(lib["default"](dev))
# Setup execution
for input_name, input_shape in shape_dict.items():
input_data = np.random.uniform(-1, 1, size=input_shape).astype("float32")
module.set_input(input_name, input_data)
ftimer = module.module.time_evaluator("run", dev, number=NUM_MEASUREMENTS_PER_REPEAT_E2E, repeat=NUM_REPEATS_E2E)
mean_perf, std_perf = measure(ftimer, True, hw_name)
return mean_perf, std_perf, module
def build_and_measure_autotvm(net, params, target_str, shape_dict, hw_name):
# else:
with autotvm.apply_history_best(get_autotvm_log_path(hw_name)):
with tvm.transform.PassContext(opt_level=OPT_LEVEL.get()):
lib = relay.build(net, target_str, params=params)
logging.info(f"We successfully built the network")
# Create workload
dev = tvm.device(target_str, 0)
module = runtime.GraphModule(lib["default"](dev))
# Setup execution
for input_name, input_shape in shape_dict.items():
input_data = np.random.uniform(-1, 1, size=input_shape).astype("float32")
module.set_input(input_name, input_data)
ftimer = module.module.time_evaluator("run", dev, number=NUM_MEASUREMENTS_PER_REPEAT_E2E, repeat=NUM_REPEATS_E2E)
mean_perf, std_perf = measure(ftimer, True, hw_name)
return mean_perf, std_perf, module
def measure_end_to_end_tvm_no_tuning(net, params, target_str, shape_dict, method_mode, net_name, hw_name, batch_size):
with tvm.transform.PassContext(opt_level=OPT_LEVEL.get()):
lib = relay.build(net, target_str, params=params)
logging.info(f"We successfully built the network")
# Create workload
dev = tvm.device(target_str, 0)
module = runtime.GraphModule(lib["default"](dev))
# Setup execution
for input_name, input_shape in shape_dict.items():
input_data = np.random.uniform(-1, 1, size=input_shape).astype("float32")
module.set_input(input_name, input_data)
ftimer = module.module.time_evaluator("run", dev, number=NUM_MEASUREMENTS_PER_REPEAT_E2E, repeat=NUM_REPEATS_E2E)
mean_perf, std_perf = measure(ftimer, True, hw_name)
return mean_perf, std_perf, module
def measure_end_to_end_perf_autotvm(net, params, target_str, shape_dict, method_mode, net_name, hw_name, batch_size):
assert is_function_node(net)
if method_mode is not None:
net = net.with_attr("CustomFusionPass", method_mode)
net = setup_attrs_ours(net, net_name, hw_name, batch_size)
return build_and_measure_autotvm(net, params, target_str, shape_dict, hw_name)
def measure_end_to_end_perf_single_backend_without_alter_layout(net, params, target_str, shape_dict, net_name, hw_name, batch_size, backend_id):
assert is_function_node(net)
net = setup_attrs_single_backend_baseline(net, net_name, hw_name, batch_size, backend_id)
return build_and_measure_autotvm_without_alter_layout(net, params, target_str, shape_dict, hw_name)
def measure_end_to_end_perf_single_backend(net, params, target_str, shape_dict, net_name, hw_name, batch_size, backend_id):
assert is_function_node(net)
net = setup_attrs_single_backend_baseline(net, net_name, hw_name, batch_size, backend_id)
return build_and_measure_autotvm(net, params, target_str, shape_dict, hw_name)
def measure_end_to_end_perf_autosch(net, params, target_str, shape_dict, is_ours, hw_name):
assert is_function_node(net)
if is_ours:
net = net.with_attr("CustomFusionPass", CustomFusionPass.DP)
with auto_scheduler.ApplyHistoryBest(AUTOSCH_LOG):
with tvm.transform.PassContext(opt_level=OPT_LEVEL.get()):
lib = relay.build(net, target_str, params=params)
# Create workload
dev = tvm.device(target_str, 0)
module = runtime.GraphModule(lib["default"](dev))
# Setup execution
for input_name, input_shape in shape_dict.items():
input_data = np.random.uniform(-1, 1, size=input_shape).astype("float32")
module.set_input(input_name, input_data)
ftimer = module.module.time_evaluator("run", dev, number=NUM_MEASUREMENTS_PER_REPEAT_E2E, repeat=NUM_REPEATS_E2E)
mean_perf, std_perf = measure(ftimer, True, hw_name)
return mean_perf, std_perf, module
def verify_network_output(net, shape_dict, mod_tvm, mod_ours):
assert is_function_node(net)
# Create same input data for two networks
name_to_data = {}
for input_name, input_shape in shape_dict.items():
input_data = np.random.uniform(-1, 1, size=input_shape).astype("float32")
name_to_data[input_name] = input_data
# Setup execution
for input_name, input_data in name_to_data.items():
mod_tvm.set_input(input_name, input_data)
mod_tvm.run()
out_tvm = mod_tvm.get_output(0).asnumpy()
# Setup execution
for input_name, input_data in name_to_data.items():
mod_ours.set_input(input_name, input_data)
mod_ours.run()
out_ours = mod_ours.get_output(0).asnumpy()
TOL = 1e-01
print("First 10 outputs")
print(f"TVM : {out_tvm.flatten()[:10]}")
# print(f"AutoTVM: {out_tvm.flatten()[:10]}")
print(f"Ours : {out_ours.flatten()[:10]}")
assert np.allclose(out_tvm, out_ours, rtol=TOL, atol=TOL)
print(f"Passed the verification of output test")
print(f"Worst diffence : {np.abs((out_ours - out_tvm)).max():.4f}")
def args_checker(args, parser):
is_missing_arg = not args.network
is_missing_arg |= not args.hw
# is_missing_arg |= not args.batch_size
# is_missing_arg |= not args.target
# is_missing_arg |= not args.dtype
if is_missing_arg:
parser.error('Make sure you input all arguments')
def get_args():
parser = argparse.ArgumentParser()
# Default type is string for argparse
parser.add_argument("-n", "--network", help="name of a neural network")
parser.add_argument("-hw", "--hw", help="target hardware")
parser.add_argument("-bs", "--batch-size", default=1, type=int, help="batch size")
# parser.add_argument("-t", "--target", help="target device")
# parser.add_argument("-dt", "--dtype", help="data type")
args = parser.parse_args()
args_checker(args, parser)
return args
def log_e2e_perf(args, method, mean_perf, std_perf, is_perf_logging):
if is_perf_logging:
E2EPerfLogger().log_perf(args.hw, args.batch_size, args.network, method, mean_perf, std_perf)
def measure_single_backend_debug(mod, params, shape_dict, args, is_perf_logging, single_backend):
mean_perf, std_perf, mod_cud = measure_end_to_end_perf_single_backend(mod["main"], params, args.target, shape_dict,
args.network, args.hw, args.batch_size,
single_backend.id())
single_backend_name = single_backend.name()
print(f"[{args.network}] Performance of {single_backend_name} on {args.hw} (mean, std) = ({mean_perf:.4f}+-{std_perf:.4f})")
def measure_dp_and_baselines(mod, params, shape_dict, args, is_perf_logging):
mean_perf, std_perf, mod_dp = measure_end_to_end_perf_autotvm(mod["main"], params, args.target, shape_dict,
CustomFusionPass.DP,
args.network, args.hw, args.batch_size)
print(f"[{args.network}] Performance of DP on {args.hw} (mean, std) = ({mean_perf:.4f}+-{std_perf:.4f})")
log_e2e_perf(args, 'DP', mean_perf, std_perf, is_perf_logging)
mean_perf, std_perf, mod_tvm = measure_end_to_end_perf_autotvm(mod["main"], params, args.target, shape_dict,
None,
args.network, args.hw, args.batch_size)
print(f"[{args.network}] Performance of AutoTVM on {args.hw} (mean, std) = ({mean_perf:.4f}+-{std_perf:.4f})")
log_e2e_perf(args, 'AutoTVM', mean_perf, std_perf, is_perf_logging)
if args.hw in NVIDIA_GPUS:
mean_perf, std_perf, mod_trt = measure_end_to_end_perf_tensorrt(mod, params, args.target, shape_dict, args.hw)
print(f"[{args.network}] Performance of TensorRT on {args.hw} (mean, std) = ({mean_perf:.4f}+-{std_perf:.4f})")
log_e2e_perf(args, 'TensorRT', mean_perf, std_perf, is_perf_logging)
#mean_perf, std_perf, mod_cud = measure_end_to_end_perf_single_backend(mod["main"], params, args.target, shape_dict,
# args.network, args.hw, args.batch_size,
# Target.CUDNN.id())
#print(f"[{args.network}] Performance of cuDNN on {args.hw} (mean, std) = ({mean_perf:.4f}+-{std_perf:.4f})")
#log_e2e_perf(args, 'cuDNN', mean_perf, std_perf, is_perf_logging)
# mean_perf, std_perf = measure_end_to_end_perf_autosch(mod["main"], params, 'cuda', shape_dict, False, args.hw)
# print(f"[AutoSCH] Performance of {args.network} (mean, std) = ({mean_perf:.4f}+-{std_perf:.4f})")
elif args.hw in INTEL_CPUS:
mean_perf, std_perf, mod_dnnl = measure_end_to_end_perf_dnnl(mod, params, args.target, shape_dict, args.hw, args)
print(f"[{args.network}] Performance of DNNL on {args.hw} (mean, std) = ({mean_perf:.4f}+-{std_perf:.4f})")
log_e2e_perf(args, 'DNNL', mean_perf, std_perf, is_perf_logging)
else:
raise Exception(f"{args.hw} is unexpected hw, we need to set default backends for this hw.")
#verify_network_output(mod["main"], shape_dict, mod_tvm, mod_trt)
#verify_network_output(mod["main"], shape_dict, mod_tvm, mod_dp)
def measure_two_level(mod, params, shape_dict, args, is_perf_logging):
mean_perf, std_perf, mod_two_level = measure_end_to_end_perf_autotvm(mod["main"], params, args.target, shape_dict,
CustomFusionPass.TWO_LEVEL_OPT,
args.network, args.hw, args.batch_size)
print(f"[{args.network}] Performance of Two-level opt on {args.hw} (mean, std) = ({mean_perf:.4f}+-{std_perf:.4f})")
log_e2e_perf(args, 'Two-level', mean_perf, std_perf, is_perf_logging)
mean_perf, std_perf, mod_tvm = measure_end_to_end_perf_autotvm(mod["main"], params, args.target, shape_dict,
None,
args.network, args.hw, args.batch_size)
print(f"[{args.network}] Performance of AutoTVM on {args.hw} (mean, std) = ({mean_perf:.4f}+-{std_perf:.4f})")
log_e2e_perf(args, 'AutoTVM', mean_perf, std_perf, is_perf_logging)
verify_network_output(mod["main"], shape_dict, mod_tvm, mod_two_level)
def measure_tvm_strategy_libs(mod, params, lib_target, shape_dict, args, is_perf_logging):
mean_perf, std_perf, mod_tvm2 = measure_end_to_end_tvm_no_tuning(mod["main"], params, args.target, shape_dict,
None, args.network, args.hw, args.batch_size)
print(f"[{args.network}] Performance of TVM (no tuning) on {args.hw} (mean, std) = ({mean_perf:.4f}+-{std_perf:.4f})")
mean_perf, std_perf, mod_tvm1 = measure_end_to_end_tvm_no_tuning(mod["main"], params, lib_target, shape_dict,
None, args.network, args.hw, args.batch_size)
print(f"[{args.network}] Performance of TVM (no tuning, with vendor libs) on {args.hw} (mean, std) = ({mean_perf:.4f}+-{std_perf:.4f})")
log_e2e_perf(args, 'AutoTVM-libs', mean_perf, std_perf, is_perf_logging)
verify_network_output(mod["main"], shape_dict, mod_tvm1, mod_tvm2)
def measure_autotvm(mod, params, shape_dict, args, is_perf_logging):
# For debugging and visualization
# mod["main"] = mod["main"].with_attr(NETWORK_FUNC_ATTR, args.network)
mean_perf, std_perf, mod_tvm = measure_end_to_end_perf_autotvm(mod["main"], params, args.target, shape_dict,
None,
args.network, args.hw, args.batch_size)
print(f"[{args.network}] Performance of AutoTVM on {args.hw} (mean, std) = ({mean_perf:.4f}+-{std_perf:.4f})")
log_e2e_perf(args, 'AutoTVM', mean_perf, std_perf, is_perf_logging)
def build_dp(net, params, target_str, shape_dict, net_name, hw_name, batch_size):
net = net.with_attr("CustomFusionPass", CustomFusionPass.DP)
net = setup_attrs_ours(net, net_name, hw_name, batch_size)
with autotvm.apply_history_best(get_autotvm_log_path(hw_name)):
with tvm.transform.PassContext(opt_level=OPT_LEVEL.get()):
lib = relay.build(net, target_str, params=params)
logging.info(f"We successfully built the network")
"""
Measure time spent for DP algorithm (dp) and op measurement (measurement)
"""
def measure_dp_tuning_time(mod, params, shape_dict, args, is_perf_logging):
n_trial = 1
dp_time_arr = []
measurement_time_arr = []
for i in range(n_trial):
# Delete operator_cost log
print("Delete operator cost for measurement")
this_code_path = os.path.dirname(os.path.abspath(__file__))
os.system(f"rm {this_code_path}/../../python/tvm/relay/transform/logs/operator_cost_{args.hw}.*")
# Measure dp + measurement time
start_time = time.time()
build_dp(mod["main"], params, args.target, shape_dict, args.network, args.hw, args.batch_size)
dp_and_measurement_time = time.time() - start_time
print(f"[{args.network}] Elapsed time of DP + Measurement on {args.hw} = {dp_and_measurement_time:.4f}s")
# Measure DP time
start_time = time.time()
build_dp(mod["main"], params, args.target, shape_dict, args.network, args.hw, args.batch_size)
dp_time = time.time() - start_time
print(f"[{args.network}] Elapsed time of DP on {args.hw} = {dp_time:.4f}s")
# Get measurement time
measurement_time = dp_and_measurement_time - dp_time
print(f"[{args.network}] Elapsed time of Measurement on {args.hw} = {measurement_time:.4f}s")
dp_time_arr.append(dp_time)
measurement_time_arr.append(measurement_time)
if is_perf_logging:
DPTuningTimeLogger().log_perf(args.hw, args.network, "DP", | np.mean(dp_time_arr) | numpy.mean |
#!/usr/bin/env python
__all__ = ['sun', 'earth', 'moon', 'jupiter', 'true_anomaly', 'eci2perif',
'elem2rv', 'rv2elem', 'tle2elem', 'calc_atmospheric_density', 'T',
'timedelta']
import os
import pathlib
import datetime
from datetime import timedelta
from numpy import sin, cos, arctan2, arccos, deg2rad, rad2deg, pi
from numpy import log, exp, sqrt, array, transpose, cross, dot
from numpy.linalg import norm
from skyfield.api import load
kernel_name = 'de421.bsp'
p = pathlib.Path(__file__).parent.absolute()
kernel = os.path.join(*p.parts, 'data', kernel_name)
jpl = load(kernel)
T = load.timescale(builtin=True)
# https://nssdc.gsfc.nasa.gov/planetary/planetfact.html
sun = {
'name': 'Sun',
'mass': 1988500e24,
'radius': 695700.0,
'mu': 132712e6,
'eph': jpl['sun']
}
earth = {
'name': 'Earth',
'mass': 5.972e24,
'radius': 6378.0,
'mu': 0.39860e6,
'j2': -1.082635854e-3,
'atm': {
'rot_vector': array([0.0, 0.0, 72.9211e-6]),
'table': array([[63.096, 2.059e-4],
[251.189, 5.909e-11],
[1000.0, 3.561e-15]])
},
'eph': jpl['earth']
}
moon = {
'name': 'Moon',
'mass': 0.07346e24,
'radius': 1738.1,
'mu': 0.00490e6,
'eph': jpl['moon']
}
jupiter = {
'name': 'Jupiter',
'mass': 1898.19e24,
'radius': 69911,
'mu': 126.687e6,
'eph': jpl['jupiter barycenter']
}
def find_rho_z(z, center):
if not 1.0 < z < 1000.0:
return [[0.0, 0.0], [0.0, 0.0]]
zs = center['atm']['table'][:, 0]
rhos = center['atm']['table'][:, 1] * 1e8
for n in range(len(rhos) - 1):
if zs[n] < z < zs[n + 1]:
return [[rhos[n], rhos[n + 1]], [zs[n], zs[n + 1]]]
return [[0.0, 0.0], [0.0, 0.0]]
def calc_atmospheric_density(z, center):
rhos, zs = find_rho_z(z, center)
if rhos[0] == 0:
return 0
Hi = -(zs[1] - zs[0]) / log(rhos[1] / rhos[0])
return rhos[0] * exp(-(z - zs[0]) / Hi)
def ecc_anomaly(e, M, eps=1e-8, max_iter=100):
u1 = M
for _ in range(max_iter):
u2 = u1 - ((u1 - e * sin(u1) - M) / (1 - e * cos(u1)))
if abs(u2 - u1) < eps:
break
u1 = u2
else:
return None
return u2
def true_anomaly(e, E):
return 2 * arctan2(sqrt(1 + e) * sin(E / 2), sqrt(1 + e) * | cos(E / 2) | numpy.cos |
import os.path as osp
import random
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mmaction.core import (ActivityNetLocalization,
average_recall_at_avg_proposals, confusion_matrix,
get_weighted_score, mean_average_precision,
mean_class_accuracy, mmit_mean_average_precision,
pairwise_temporal_iou, top_k_accuracy)
from mmaction.core.evaluation.ava_utils import ava_eval
def gt_confusion_matrix(gt_labels, pred_labels, normalize=None):
"""Calculate the ground truth confusion matrix."""
max_index = max(max(gt_labels), max(pred_labels))
confusion_mat = np.zeros((max_index + 1, max_index + 1), dtype=np.int64)
for gt, pred in zip(gt_labels, pred_labels):
confusion_mat[gt][pred] += 1
del_index = []
for i in range(max_index):
if sum(confusion_mat[i]) == 0 and sum(confusion_mat[:, i]) == 0:
del_index.append(i)
confusion_mat = np.delete(confusion_mat, del_index, axis=0)
confusion_mat = np.delete(confusion_mat, del_index, axis=1)
if normalize is not None:
confusion_mat = np.array(confusion_mat, dtype=np.float)
m, n = confusion_mat.shape
if normalize == 'true':
for i in range(m):
s = np.sum(confusion_mat[i], dtype=float)
if s == 0:
continue
confusion_mat[i, :] = confusion_mat[i, :] / s
print(confusion_mat[i, :])
elif normalize == 'pred':
for i in range(n):
s = sum(confusion_mat[:, i])
if s == 0:
continue
confusion_mat[:, i] = confusion_mat[:, i] / s
elif normalize == 'all':
s = np.sum(confusion_mat)
if s != 0:
confusion_mat /= s
return confusion_mat
def test_activitynet_localization():
data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../data/eval_localization'))
gt_path = osp.join(data_prefix, 'gt.json')
result_path = osp.join(data_prefix, 'result.json')
localization = ActivityNetLocalization(gt_path, result_path)
results = localization.evaluate()
mAP = np.array([
0.71428571, 0.71428571, 0.71428571, 0.6875, 0.6875, 0.59722222,
0.52083333, 0.52083333, 0.52083333, 0.5
])
average_mAP = 0.6177579365079365
assert_array_almost_equal(results[0], mAP)
assert_array_almost_equal(results[1], average_mAP)
def test_ava_detection():
data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../data/eval_detection'))
gt_path = osp.join(data_prefix, 'gt.csv')
result_path = osp.join(data_prefix, 'pred.csv')
label_map = osp.join(data_prefix, 'action_list.txt')
# eval bbox
detection = ava_eval(result_path, 'mAP', label_map, gt_path, None)
assert_array_almost_equal(detection['[email protected]'], 0.09385522)
def test_confusion_matrix():
# custom confusion_matrix
gt_labels = [np.int64(random.randint(0, 9)) for _ in range(100)]
pred_labels = np.random.randint(10, size=100, dtype=np.int64)
for normalize in [None, 'true', 'pred', 'all']:
cf_mat = confusion_matrix(pred_labels, gt_labels, normalize)
gt_cf_mat = gt_confusion_matrix(gt_labels, pred_labels, normalize)
| assert_array_equal(cf_mat, gt_cf_mat) | numpy.testing.assert_array_equal |
###############################################################################
# evolveddiskdf.py: module that builds a distribution function as a
# steady-state DF + subsequent evolution
#
# This module contains the following classes:
#
# evolveddiskdf - top-level class that represents a distribution function
###############################################################################
from __future__ import print_function
_NSIGMA= 4.
_NTS= 1000
_PROFILE= False
import sys
import math
import copy
import time as time_module
import warnings
import numpy as nu
from scipy import integrate
from galpy.util import galpyWarning
from galpy.orbit import Orbit
from galpy.potential import calcRotcurve
from galpy.df_src.df import df, _APY_LOADED
from galpy.potential_src.Potential import _check_c
from galpy.util.bovy_quadpack import dblquad
from galpy.util import bovy_plot
from galpy.util.bovy_conversion import physical_conversion, \
potential_physical_input, time_in_Gyr
if _APY_LOADED:
from astropy import units
_DEGTORAD= math.pi/180.
_RADTODEG= 180./math.pi
_NAN= nu.nan
class evolveddiskdf(df):
"""Class that represents a diskdf as initial DF + subsequent secular evolution"""
def __init__(self,initdf,pot,to=0.):
"""
NAME:
__init__
PURPOSE:
initialize
INPUT:
initdf - the df at the start of the evolution (at to) (units are transferred)
pot - potential to integrate orbits in
to= initial time (time at which initdf is evaluated; orbits are integrated from current t back to to) (can be Quantity)
OUTPUT:
instance
HISTORY:
2011-03-30 - Written - Bovy (NYU)
"""
if initdf._roSet: ro= initdf._ro
else: ro= None
if initdf._voSet: vo= initdf._vo
else: vo= None
df.__init__(self,ro=ro,vo=vo)
self._initdf= initdf
self._pot= pot
if _APY_LOADED and isinstance(to,units.Quantity):
to= to.to(units.Gyr).value/time_in_Gyr(self._vo,self._ro)
self._to= to
@physical_conversion('phasespacedensity2d',pop=True)
def __call__(self,*args,**kwargs):
"""
NAME:
__call__
PURPOSE:
evaluate the distribution function
INPUT:
Orbit instance:
a) Orbit instance alone: use initial state and t=0
b) Orbit instance + t: Orbit instance *NOT* called (i.e., Orbit's initial condition is used, call Orbit yourself), t can be Quantity
If t is a list of t, DF is returned for each t, times must be in descending order and equally spaced (does not work with marginalize...)
marginalizeVperp - marginalize over perpendicular velocity (only supported with 1a) above) + nsigma, +scipy.integrate.quad keywords
marginalizeVlos - marginalize over line-of-sight velocity (only supported with 1a) above) + nsigma, +scipy.integrate.quad keywords
log= if True, return the log (not for deriv, bc that can be negative)
integrate_method= method argument of orbit.integrate
deriv= None, 'R', or 'phi': calculates derivative of the moment wrt R or phi **not with the marginalize options**
OUTPUT:
DF(orbit,t)
HISTORY:
2011-03-30 - Written - Bovy (NYU)
2011-04-15 - Added list of times option - Bovy (NYU)
"""
integrate_method= kwargs.pop('integrate_method','dopr54_c')
# Must match Python fallback for non-C potentials here, bc odeint needs
# custom t list to avoid numerically instabilities
if '_c' in integrate_method and not _check_c(self._pot):
if ('leapfrog' in integrate_method \
or 'symplec' in integrate_method):
integrate_method= 'leapfrog'
else:
integrate_method= 'odeint'
deriv= kwargs.get('deriv',None)
if isinstance(args[0],Orbit):
if len(args) == 1:
t= 0.
else:
t= args[1]
else:
raise IOError("Input to __call__ not understood; this has to be an Orbit instance with optional time")
if isinstance(t,list):
t= nu.array(t)
tlist= True
elif isinstance(t,nu.ndarray) and \
not (hasattr(t,'isscalar') and t.isscalar):
tlist= True
else: tlist= False
if _APY_LOADED and isinstance(t,units.Quantity):
t= t.to(units.Gyr).value/time_in_Gyr(self._vo,self._ro)
if kwargs.pop('marginalizeVperp',False):
if tlist: raise IOError("Input times to __call__ is a list; this is not supported in conjunction with marginalizeVperp")
if kwargs.pop('log',False):
return nu.log(self._call_marginalizevperp(args[0],integrate_method=integrate_method,**kwargs))
else:
return self._call_marginalizevperp(args[0],integrate_method=integrate_method,**kwargs)
elif kwargs.pop('marginalizeVlos',False):
if tlist: raise IOError("Input times to __call__ is a list; this is not supported in conjunction with marginalizeVlos")
if kwargs.pop('log',False):
return nu.log(self._call_marginalizevlos(args[0],integrate_method=integrate_method,**kwargs))
else:
return self._call_marginalizevlos(args[0],integrate_method=integrate_method,**kwargs)
#Integrate back
if tlist:
if self._to == t[0]:
if kwargs.get('log',False):
return nu.log([self._initdf(args[0],use_physical=False)])
else:
return [self._initdf(args[0],use_physical=False)]
ts= self._create_ts_tlist(t,integrate_method)
o= args[0]
#integrate orbit
if _PROFILE: #pragma: no cover
start= time_module.time()
if not deriv is None:
#Also calculate the derivative of the initial df with respect to R, phi, vR, and vT, and the derivative of Ro wrt R/phi etc., to calculate the derivative; in this case we also integrate a small area of phase space
if deriv.lower() == 'r':
dderiv= 10.**-10.
tmp= o.R(use_physical=False)+dderiv
dderiv= tmp-o.R(use_physical=False)
msg= o._orb.integrate_dxdv([dderiv,0.,0.,0.],ts,self._pot,method=integrate_method)
elif deriv.lower() == 'phi':
dderiv= 10.**-10.
tmp= o.phi(use_physical=False)+dderiv
dderiv= tmp-o.phi(use_physical=False)
msg= o._orb.integrate_dxdv([0.,0.,0.,dderiv],ts,self._pot,method=integrate_method)
if msg > 0.: # pragma: no cover
print("Warning: dxdv integration inaccurate, returning zero everywhere ... result might not be correct ...")
if kwargs.get('log',False) and deriv is None: return nu.zeros(len(t))-nu.finfo(nu.dtype(nu.float64)).max
else: return nu.zeros(len(t))
o._orb.orbit= o._orb.orbit_dxdv[:,0:4]
else:
o.integrate(ts,self._pot,method=integrate_method)
if _PROFILE: #pragma: no cover
int_time= (time_module.time()-start)
#Now evaluate the DF
if _PROFILE: #pragma: no cover
start= time_module.time()
if integrate_method == 'odeint':
retval= []
os= [o(self._to+t[0]-ti,use_physical=False) for ti in t]
retval= nu.array(self._initdf(os,use_physical=False))
else:
if len(t) == 1:
orb_array= o.getOrbit().T
orb_array= orb_array[:,1]
else:
orb_array= o.getOrbit().T
retval= self._initdf(orb_array,use_physical=False)
if (isinstance(retval,float) or len(retval.shape) == 0) \
and nu.isnan(retval):
retval= 0.
elif not isinstance(retval,float) and len(retval.shape) > 0:
retval[(nu.isnan(retval))]= 0.
if len(t) > 1: retval= retval[::-1]
if _PROFILE: #pragma: no cover
df_time= (time_module.time()-start)
tot_time= int_time+df_time
print(int_time/tot_time, df_time/tot_time, tot_time)
if not deriv is None:
if integrate_method == 'odeint':
dlnfdRo= nu.array([self._initdf._dlnfdR(o.R(self._to+t[0]-ti,use_physical=False),
o.vR(self._to+t[0]-ti,use_physical=False),
o.vT(self._to+t[0]-ti,use_physical=False))
for ti in t])
dlnfdvRo= nu.array([self._initdf._dlnfdvR(o.R(self._to+t[0]-ti,use_physical=False),
o.vR(self._to+t[0]-ti,use_physical=False),
o.vT(self._to+t[0]-ti,use_physical=False))
for ti in t])
dlnfdvTo= nu.array([self._initdf._dlnfdvT(o.R(self._to+t[0]-ti,use_physical=False),
o.vR(self._to+t[0]-ti,use_physical=False),
o.vT(self._to+t[0]-ti,use_physical=False))
for ti in t])
dRo= nu.array([o._orb.orbit_dxdv[list(ts).index(self._to+t[0]-ti),4] for ti in t])/dderiv
dvRo= nu.array([o._orb.orbit_dxdv[list(ts).index(self._to+t[0]-ti),5] for ti in t])/dderiv
dvTo= nu.array([o._orb.orbit_dxdv[list(ts).index(self._to+t[0]-ti),6] for ti in t])/dderiv
#print(dRo, dvRo, dvTo)
dlnfderiv= dlnfdRo*dRo+dlnfdvRo*dvRo+dlnfdvTo*dvTo
retval*= dlnfderiv
else:
if len(t) == 1:
dlnfdRo= self._initdf._dlnfdR(orb_array[0],
orb_array[1],
orb_array[2])
dlnfdvRo= self._initdf._dlnfdvR(orb_array[0],
orb_array[1],
orb_array[2])
dlnfdvTo= self._initdf._dlnfdvT(orb_array[0],
orb_array[1],
orb_array[2])
else:
dlnfdRo= nu.array([self._initdf._dlnfdR(orb_array[0,ii],
orb_array[1,ii],
orb_array[2,ii])
for ii in range(len(t))])
dlnfdvRo= nu.array([self._initdf._dlnfdvR(orb_array[0,ii],
orb_array[1,ii],
orb_array[2,ii])
for ii in range(len(t))])
dlnfdvTo= nu.array([self._initdf._dlnfdvT(orb_array[0,ii],
orb_array[1,ii],
orb_array[2,ii])
for ii in range(len(t))])
dorb_array= o._orb.orbit_dxdv.T
if len(t) == 1: dorb_array= dorb_array[:,1]
dRo= dorb_array[4]/dderiv
dvRo= dorb_array[5]/dderiv
dvTo= dorb_array[6]/dderiv
#print(dRo, dvRo, dvTo)
dlnfderiv= dlnfdRo*dRo+dlnfdvRo*dvRo+dlnfdvTo*dvTo
if len(t) > 1: dlnfderiv= dlnfderiv[::-1]
retval*= dlnfderiv
else:
if self._to == t and deriv is None:
if kwargs.get('log',False):
return nu.log(self._initdf(args[0],use_physical=False))
else:
return self._initdf(args[0],use_physical=False)
elif self._to == t and not deriv is None:
if deriv.lower() == 'r':
return self._initdf(args[0])*self._initdf._dlnfdR(args[0]._orb.vxvv[0],
args[0]._orb.vxvv[1],
args[0]._orb.vxvv[2])
elif deriv.lower() == 'phi':
return 0.
if integrate_method == 'odeint':
ts= nu.linspace(t,self._to,_NTS)
else:
ts= nu.linspace(t,self._to,2)
o= args[0]
#integrate orbit
if not deriv is None:
ts= | nu.linspace(t,self._to,_NTS) | numpy.linspace |
from __future__ import print_function, division, absolute_import
import collections
import re
import numpy
from . import types, config, npdatetime
from .targets import ufunc_db
version = tuple(map(int, numpy.__version__.split('.')[:2]))
int_divbyzero_returns_zero = config.PYVERSION <= (3, 0)
FROM_DTYPE = {
numpy.dtype('bool'): types.boolean,
numpy.dtype('int8'): types.int8,
numpy.dtype('int16'): types.int16,
numpy.dtype('int32'): types.int32,
numpy.dtype('int64'): types.int64,
numpy.dtype('uint8'): types.uint8,
| numpy.dtype('uint16') | numpy.dtype |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Extraction of thumbs from sd09 dataset
"""
import os
import argparse
import shutil
import progressbar
import numpy as np
import matplotlib.pyplot as plt
import scipy
import scipy.signal
import scipy.ndimage
import scipy.spatial
import scipy.misc
from scipy.ndimage.filters import gaussian_gradient_magnitude
from peakdetect import peakdet
import math
from multiprocessing import Pool
import psutil
import h5py
from progressbar import widgets
def scan_dir(path, ending):
"""Recursively scan the folder"""
file_list = []
dir_list = []
for curr_dir, _, local_files in os.walk(path):
# filter local files
local_files = [os.path.join(curr_dir, x) for x in local_files if x.endswith(ending)]
# append to global list
file_list += local_files
if local_files:
dir_list.append(curr_dir)
return dir_list, file_list
def gaussWin2D(shape, sigma=None):
"""
Create a 2D Gaussian window
The shape must have 2 components, namely the vertical and horizontal,
in this order.
"""
# Check input
if len(shape) == 1:
shape = (shape, shape)
elif len(shape) > 2:
shape = shape[:1]
shape = [max([1, x]) for x in shape]
if not sigma:
sigma = [x/2.0 for x in shape]
else:
if len(sigma) == 1:
sigma = (sigma, sigma)
elif len(shape) > 2:
sigma = sigma[:1]
sigma = [np.finfo(float32).eps for x in sigma if x <= 0]
# Create vertical and horizontal components
v = scipy.signal.gaussian(shape[0], sigma[0])
v = np.reshape(v, (-1, 1)) # column
h = scipy.signal.gaussian(shape[1], sigma[1])
h = np.reshape(h, (1, -1)) # row
return np.dot(v, h)
def hannWin2D(shape, sigma=None):
"""
Create a 2D Gaussian window
The shape must have 2 components, namely the vertical and horizontal,
in this order.
"""
# Check input
if len(shape) == 1:
shape = (shape, shape)
elif len(shape) > 2:
shape = shape[:1]
shape = [max([1, x]) for x in shape]
if not sigma:
sigma = [x/2.0 for x in shape]
else:
if len(sigma) == 1:
sigma = (sigma, sigma)
elif len(shape) > 2:
sigma = sigma[:1]
sigma = [np.finfo(float32).eps for x in sigma if x <= 0]
# Create vertical and horizontal components
v = scipy.signal.hann(shape[0], sym=True)
v = np.reshape(v, (-1, 1)) # column
h = scipy.signal.hann(shape[1], sym=True)
h = np.reshape(h, (1, -1)) # row
return np.dot(v, h)
def circleWin(rad):
rad_span = np.arange(-rad, rad+1)
x, y = np.meshgrid(rad_span, rad_span)
kernel = x**2 + y**2 <= rad**2
return kernel.astype(int)
def cropToMaxEnclosedSquare(image):
s = min(image.shape)
return image[0:s, 0:s]
def cart2pol(_mat, N):
"""
Transform an XY matrix in a rho-theta matrix.
N is the number of angles used in the process.
"""
dx = 1.0 # N dx = pi r
output_theta = np.linspace(0, np.pi, N)
# Force mat to be a square
if _mat.shape[0] != _mat.shape[1]:
mat = cropToMaxEnclosedSquare(_mat)
# Get matrix information
mat_size = mat.shape[0]
mat_center = mat_size/2.0+0.5
# Create interpolator
xy_span = np.arange(0, mat_size)
interpolator = scipy.interpolate.RectBivariateSpline(xy_span, xy_span, mat)
# Create array of radii
rad_span = np.arange(0, mat_center)
# Initialize the output matrix
output = np.zeros([len(rad_span), len(output_theta)])
# For each radius create the array of angles, then interpolate over those points,
# and resample to get N points
for rad in rad_span:
if rad == 0:
output[int(rad), :] = np.ones_like(output_theta) * interpolator.ev(mat_center, mat_center)
else:
NP = int(np.pi*rad/dx)
theta = np.linspace(0, np.pi, NP)
xx = rad * np.cos(theta) + mat_center
yy = rad * np.sin(theta) + mat_center
circle = interpolator.ev(xx, yy)
if NP > 2*N:
circle = scipy.ndimage.filters.uniform_filter1d(circle, int(NP/N))
circle_interp = scipy.interpolate.interp1d(theta, circle)
output[int(rad), :] = circle_interp(output_theta)
# theta_span = np.linspace(0, np.pi, N)
# theta, rad = np.meshgrid(theta_span, rad_span)
# xx = rad * np.cos(theta) + mat_center
# yy = rad * np.sin(theta) + mat_center
# return interpolator.ev(xx, yy)
return output
def computeRidgeFrequency(image, allowed_range=None):
length = np.min(image.shape)
# Compute the FFT of the image using a gaussian window
kernel = hannWin2D(image.shape)
img = image * kernel
img = (img-img.mean()) / img.std()
img = np.fft.fftshift(np.absolute(np.fft.fft2(img)))
# Convert the image to polar representation
img = cart2pol(img, 32)
# Sum (L2-norm) along the angle axis, to get the energy for each circle
circle_energy = np.sqrt(np.sum(img**2, axis=1))
# Suppress low order terms
if allowed_range != None:
radii = np.arange(0, len(circle_energy))
radii_upper = radii < length/allowed_range[0]
radii_lower = radii > length/allowed_range[1]
radii_nallowed = np.logical_not(np.logical_and(radii_upper, radii_lower))
radii = radii[radii_nallowed]
circle_energy[radii] = circle_energy.min()
# Find the most energetic circle
circle_logen = np.log(circle_energy)
circle_logen = scipy.ndimage.filters.gaussian_filter(circle_logen, sigma=2, mode="nearest")
peakind, _ = peakdet(circle_logen, circle_logen.std()/2)
if len(peakind)==0:
return 10
else:
max_peak = np.argmax(peakind[:,1])
if peakind[max_peak, 0]==0:
return 10
else:
return length/peakind[max_peak, 0]
def segmentation(image, _f, p):
"""
Segment input image.
Args:
- image: the image to be segmented
- _f: frequency of the ridges in the given image
- p: percentile for threshold
"""
# Compute image edges
s = scipy.ndimage.gaussian_gradient_magnitude(image, sigma=_f//6, mode='nearest')
# Normalize image globally
s = (s-s.min())/(s.max()-s.min())
# Saturate values far from the threshold
rad = min([p, 1-p])/2
saturate_fn = lambda X: (np.tanh(2*(X-p)/rad)+1)/2
# Saturate the image and smooth the image
s = scipy.ndimage.filters.gaussian_filter(saturate_fn(s), _f//2, mode='nearest')
# Segmentation
foreground = (s >= 0.5).astype(int)
# # Take r as half the frequency
# f = math.ceil(_f/2)
# # Generate a circle window as neighborhood
# kernel = circleWin(f)
# kernel = kernel / kernel.sum()
# # Compute the mean in each neighborhood
# m = scipy.signal.convolve2d(image, kernel, mode='same', boundary='symm')
# # Compute the standard deviation in each neighborhood
# s = np.sqrt(scipy.signal.convolve2d((image-m)**2, kernel, mode='same', boundary='symm'))
# # Smooth the values
# s = scipy.ndimage.filters.gaussian_filter(s, 8*f, mode='nearest')
# # Compute the threshold for s as the p-percentile
# t = np.percentile(s, p)
# # Segmentation
# foreground = (s >= t).astype(int)
if np.count_nonzero(foreground) > 0:
# Compute the connected components of the foreground and select the largest
label_im, n_labels = scipy.ndimage.label(foreground)
all_labels = np.arange(1, n_labels+1)
label_area = scipy.ndimage.labeled_comprehension(foreground, label_im, all_labels, np.sum, int, 0)
largest_idx = all_labels[np.argmax(label_area)]
foreground = (label_im == largest_idx).astype(int)
# Hole fill
foreground = scipy.ndimage.morphology.binary_fill_holes(foreground)
return foreground
def find_roi_pos(mask, roi_shape=(128,128), step=None):
# Eventually reduce mask to gridded points
if step:
_mask = np.zeros_like(mask)
_mask[::step, ::step] = | np.logical_or(_mask[::step, ::step], mask[::step, ::step]) | numpy.logical_or |
from __future__ import absolute_import, division, print_function
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
from skbio import DistanceMatrix
from skbio.diversity.beta import pw_distances, pw_distances_from_table
class HelperBiomTable(object):
"""An object that looks like a BIOM table, for use in testing
This allows us to test passing BIOM-like objects, without having to
depend on the biom-format project (since this would ultimately be a
circular dependency).
"""
def __init__(self, data, observation_ids, sample_ids):
self._data = data.T
self.observation_ids = observation_ids
self.sample_ids = sample_ids
def ids(self, axis):
return self.sample_ids
def data(self, sample_id):
i = self.sample_ids.index(sample_id)
return self._data[i]
class BaseTests(TestCase):
def setUp(self):
self.t1 = [[1, 5],
[2, 3],
[0, 1]]
self.ids1 = list('ABC')
self.t2 = [[23, 64, 14, 0, 0, 3, 1],
[0, 3, 35, 42, 0, 12, 1],
[0, 5, 5, 0, 40, 40, 0],
[44, 35, 9, 0, 1, 0, 0],
[0, 2, 8, 0, 35, 45, 1],
[0, 0, 25, 35, 0, 19, 0]]
self.ids2 = list('ABCDEF')
# In the future, if necessary, it should be possible to just replace
# HelperBiomTable with Table in the following lines to test with the
# biom.table.Table object directly (i.e., this constructor
# interface aligns with the biom.table.Table constructor
# interface).
self.table1 = HelperBiomTable(
np.array(self.t1).T, observation_ids=range(2),
sample_ids=self.ids1)
self.table2 = HelperBiomTable(
np.array(self.t2).T, observation_ids=range(7),
sample_ids=self.ids2)
def test_pw_distances_invalid_input(self):
# number of ids doesn't match the number of samples
self.assertRaises(ValueError, pw_distances, self.t1, list('AB'),
'euclidean')
def test_pw_distances_euclidean(self):
actual_dm = pw_distances(self.t1, self.ids1, 'euclidean')
self.assertEqual(actual_dm.shape, (3, 3))
npt.assert_almost_equal(actual_dm['A', 'A'], 0.0)
npt.assert_almost_equal(actual_dm['B', 'B'], 0.0)
npt.assert_almost_equal(actual_dm['C', 'C'], 0.0)
| npt.assert_almost_equal(actual_dm['A', 'B'], 2.23606798) | numpy.testing.assert_almost_equal |
"""
Classic cart-pole system implemented by <NAME> et al.
Copied from http://incompleteideas.net/sutton/book/code/pole.c
permalink: https://perma.cc/C9ZM-652R
"""
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
from scipy.integrate import ode
g = 9.8 # gravity
force_mag = 10.0
tau = 0.02 # seconds between state updates
# cart
m_cart = 1
# pole 1
l_1 = 1 # length
m_1 = 0.1 # mass
# pole 2
l_2 = 1 # length
m_2 = 0.1 # mass
def f(time, state, input):
x = state[0]
x_dot = state[1]
theta_1 = state[2]
theta_1_dot = state[3]
theta_2 = state[4]
theta_2_dot = state[5]
x_dot_dot = ((l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_1_dot ** 2
+ g * l_2 * m_2 * np.sin(theta_2)) * (m_1 * np.cos(theta_2) + m_2 * np.cos(theta_2)
- m_1 * np.cos(theta_1 - theta_2) * np.cos(theta_1)
- m_2 * np.cos(theta_1 - theta_2) * | np.cos(theta_1) | numpy.cos |
from io import BytesIO
from imgutils import pngify
from matplotlib.colors import hsv_to_rgb, LinearSegmentedColormap
import matplotlib.pyplot as plt
from random import random
import io
import copy
import json
import matplotlib
import numpy as np
import os
import random
import tarfile
import tempfile
import boto3
import sys
from werkzeug.utils import secure_filename
from skimage import filters
import skimage.morphology
from skimage.morphology import watershed, dilation, disk
from skimage.morphology import flood_fill, flood
from skimage.draw import circle
from skimage.measure import regionprops
from skimage.exposure import rescale_intensity
from config import S3_KEY, S3_SECRET
# Connect to the s3 service
s3 = boto3.client(
"s3",
aws_access_key_id=S3_KEY,
aws_secret_access_key=S3_SECRET
)
class ZStackReview:
def __init__(self, filename, input_bucket, output_bucket, subfolders):
self.filename = filename
self.input_bucket = input_bucket
self.output_bucket = output_bucket
self.subfolders = subfolders
self.trial = self.load(filename)
self.raw = self.trial["raw"]
self.annotated = self.trial["annotated"]
self.feature = 0
self.feature_max = self.annotated.shape[-1]
self.channel = 0
self.max_frames, self.height, self.width, self.channel_max = self.raw.shape
self.dimensions = (self.width, self.height)
#create a dictionary that has frame information about each cell
#analogous to .trk lineage but do not need relationships between cells included
self.cell_ids = {}
self.num_cells = {}
self.cell_info = {}
self.current_frame = 0
for feature in range(self.feature_max):
self.create_cell_info(feature)
self.draw_raw = False
self.max_intensity = {}
for channel in range(self.channel_max):
self.max_intensity[channel] = None
self.dtype_raw = self.raw.dtype
self.scale_factor = 2
self.save_version = 0
self.color_map = plt.get_cmap('viridis')
self.color_map.set_bad('black')
self.frames_changed = False
self.info_changed = False
@property
def readable_tracks(self):
"""
Preprocesses tracks for presentation on browser. For example,
simplifying track['frames'] into something like [0-29] instead of
[0,1,2,3,...].
"""
cell_info = copy.deepcopy(self.cell_info)
for _, feature in cell_info.items():
for _, label in feature.items():
slices = list(map(list, consecutive(label['frames'])))
slices = '[' + ', '.join(["{}".format(a[0])
if len(a) == 1 else "{}-{}".format(a[0], a[-1])
for a in slices]) + ']'
label["slices"] = str(slices)
return cell_info
def get_frame(self, frame, raw):
if raw:
frame = self.raw[frame][:,:, self.channel]
return pngify(imgarr=frame,
vmin=0,
vmax=self.max_intensity[self.channel],
cmap="cubehelix")
else:
frame = self.annotated[frame][:,:, self.feature]
frame = np.ma.masked_equal(frame, 0)
return pngify(imgarr=frame,
vmin=0,
vmax= | np.max(self.cell_ids[self.feature]) | numpy.max |
# ******************************************************************************
# Copyright 2017-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import ngraph as ng
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node, run_op_numeric_data
from tests import xfail_issue_40957
def test_concat():
a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6]])
axis = 0
expected = np.concatenate((a, b), axis=0)
runtime = get_runtime()
parameter_a = ng.parameter(list(a.shape), name="A", dtype=np.float32)
parameter_b = ng.parameter(list(b.shape), name="B", dtype=np.float32)
node = ng.concat([parameter_a, parameter_b], axis)
computation = runtime.computation(node, parameter_a, parameter_b)
result = computation(a, b)
assert np.allclose(result, expected)
@xfail_issue_40957
@pytest.mark.parametrize(
"val_type, value", [(bool, False), (bool, np.empty((2, 2), dtype=bool))]
)
def test_constant_from_bool(val_type, value):
expected = np.array(value, dtype=val_type)
result = run_op_numeric_data(value, ng.constant, val_type)
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"val_type, value",
[
pytest.param(np.float32, np.float32(0.1234), marks=xfail_issue_40957),
pytest.param(np.float64, | np.float64(0.1234) | numpy.float64 |
import torch
import bionetwork
import matplotlib.pyplot as plt
import numpy
import plotting
import time
networkSize = 50
batchsize = 5
activationFunction = 'MML'
networkList, nodeNames = bionetwork.getRandomNet(networkSize, 0.1)
MOA = numpy.full(networkList.shape, False, dtype=bool)
input = torch.randn(batchsize, len(nodeNames), dtype=torch.double, requires_grad=True)
parameters = bionetwork.trainingParameters(iterations=150, clipping=1)
net1 = bionetwork.bionetworkAutoGrad(networkList, len(nodeNames))
net2 = bionetwork.bionet(networkList, len(nodeNames), MOA, parameters, activationFunction, torch.double)
net2.weights.data = net1.A.values.data
net2.bias.data = net1.bias.data
#test = torch.autograd.gradcheck(net1, input, eps=1e-4, atol=1e-6)
#test = torch.autograd.gradcheck(net2, input, eps=1e-6, atol=1e-6)
networkSize = 100
batchsize = 5
networkList, nodeNames = bionetwork.getRandomNet(networkSize, 0.5)
MOA = | numpy.full(networkList.shape, False, dtype=bool) | numpy.full |
import numpy as np
import matplotlib.pyplot as plt
DEBUG = False
def sigmoid(Xm):
Xmcp = np.copy(Xm)
for x in np.nditer(Xmcp, op_flags=['readwrite']):
x[...] = 1/(1 + np.exp(-x))
return Xmcp
# Inputs:
# W1m = [[w10,w11,w12]
# [w20,w21,w22]]
def y_fw_propagate(W1m,W2m,Xm):
Bm = np.matmul(W1m,Xm.transpose())
Zm = sigmoid(Bm)
#Zpm = (np.concatenate((np.array([[1,1,1,1]]),Zm),axis=0))
Zpm = (np.concatenate((np.ones((1,Xm.shape[0])),Zm),axis=0))
if(DEBUG):
print("Zpm:")
print(Zpm)
A1m = np.matmul(W2m,Zpm)
return sigmoid(A1m),Zm
def stepGradient(Xm,Y1m,Tm,Zm,W1m,W2m,gamma):
#compute W^(2) gradient
#W10,W11,W12
#remove W10
#W2pm = [[W11,W12]]
W2pm = np.delete(W2m,0).reshape(1,2)
Zpm = (np.concatenate((np.array([[1,1,1,1]]),Zm),axis=0))
DT2m = (Y1m - Tm)*Y1m*(1 - Y1m)
gradient_W2m = DT2m*Zpm
if(DEBUG):
print("gradient_W2m")
print(gradient_W2m)
gradient_W2m = gradient_W2m.sum(axis=1) #returns row vector
#compute W^(1) gradient
# Wji, ji = 10,20,11,21,12,22
# 4 columns for n = 1,2,3,4
# 2 rows for W(j=1), W(j=2)
gradient_W1i0m = DT2m*W2pm.transpose()*Zm*(1-Zm)*Xm[:,[0]].transpose() #W(j=1,2)(i=0), n=1,2,3
gradient_W1i1m = DT2m*W2pm.transpose()*Zm*(1-Zm)*Xm[:,[1]].transpose() #W(j=1,2)(i=1), n=1,2,3
gradient_W1i2m = DT2m*W2pm.transpose()*Zm*(1-Zm)*Xm[:,[2]].transpose() #W(j=1,2)(i=2), n=1,2,3
gradient_W1m = np.concatenate((gradient_W1i0m,gradient_W1i1m),axis=0)
gradient_W1m = np.concatenate((gradient_W1m,gradient_W1i2m),axis=0)
if(DEBUG):
print("gradient_W1m")
print(gradient_W2m)
gradient_W1m = gradient_W1m.sum(axis=1) # sum and return Wji only
#At this point, returns gradient_W1m row vector 10,20,11,21,12,22
gradient_W1m = gradient_W1m.reshape(3,2).transpose()
W1m_next = W1m - gamma*gradient_W1m
W2m_next = W2m - gamma*gradient_W2m
return W1m_next,W2m_next
def main():
Xm = [
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]
]
Xm = np.array(Xm)
Tm = [[0,1,1,0]]
Tm = | np.array(Tm) | numpy.array |
""" Copyright 2016-2022 by Sophgo Technologies Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Preprocessor and postprocessor of mtcnn
"""
import cv2
import json
import numpy as np
class PreProcessor():
def __init__(self, mean, scale_factor, face_factor=0.7, min_size=40):
""" Constructor.
Args:
mean: List of mean value of each channel
scale_factor: Scale value to preprocess input image
face_factor: Initial value to generate image pyramid scale factors
min_size: Minmum size of detection
"""
self.mean = mean
self.scale_factor = scale_factor
self.face_factor = face_factor
self.min_size = min_size
def generate_scales(self, height, width):
""" Generate image pyramid scale factors.
Args:
height: Image height
width: Image width
Returns:
A list of scale factors
"""
min_hw = min(height, width)
m_scale = 12.0 / self.min_size
min_hw = int(min_hw * m_scale)
scales = []
factor_count = 0
while min_hw >= 50:
scales.append(m_scale * pow(self.face_factor, factor_count))
min_hw = int(min_hw * self.face_factor)
factor_count += 1
return scales
def pnet_process(self, image, height, width):
""" Preprocess function of PNet.
Args:
image: Input image
height: Expected image height
width: Expected image width
Returns:
4-dim ndarray
"""
image = cv2.resize(image, (width, height)).astype(np.float32)
image[:, :, 0] -= self.mean[0]
image[:, :, 1] -= self.mean[1]
image[:, :, 2] -= self.mean[2]
image *= self.scale_factor
image = np.transpose(image, (2, 0, 1))
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
return image.copy()
def rnet_process(self, image, boxes, height, width):
""" Preprocess function of RNet
Args:
image: Input image
boxes: Detected boxes by PNet
height: Expected image height
width: Expected image width
Returns:
4-dim ndarray
"""
data = self.__padding(image, boxes, height, width)
return data
def onet_process(self, image, boxes, height, width):
""" Preprocess function of ONet
Args:
image: Input image
boxes: Detected boxes by RNet
height: Expected image height
width: Expected image width
Returns:
4-dim ndarray
"""
data = self.__padding(image, boxes, height, width)
return data
def __padding(self, image, boxes, height, width):
""" Padding function for bounding boxes.
Args:
image: Input image
boxes: Detected bounding boxes
height: Expected image height
width: Expected image width
Returns:
4-dim ndarray
"""
temp = boxes[:, :4].astype(np.int)
y1 = np.where(temp[:, 0] < 0)[0]
if len(y1) > 0:
temp[y1, 0] = 0
x1 = np.where(temp[:, 1] < 0)[0]
if len(x1) > 0:
temp[x1, 0] = 0
y2 = np.where(temp[:, 2] > image.shape[0] - 1)[0]
if len(y2) > 0:
temp[y2, 0] = image.shape[0] - 1
x2 = np.where(temp[:, 3] > image.shape[1] - 1)[0]
if len(x2) > 0:
temp[x2, 0] = image.shape[1] - 1
pad_top = np.abs(temp[:, 0] - boxes[:, 0]).astype(np.int)
pad_left = np.abs(temp[:, 1] - boxes[:, 1]).astype(np.int)
pad_bottom = np.abs(temp[:, 2] - boxes[:, 2]).astype(np.int)
pad_right = | np.abs(temp[:, 3] - boxes[:, 3]) | numpy.abs |
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
import felis
from astropy.io import fits
import scipy.signal
import pdb
import matplotlib.pyplot as plt
import datetime
import pickle
import sys
import os
import numpy as np
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def match_templates2specs(templates,spectra,speczs,picklename,wavewindow=[50.0],wavecen_restframe=[1908.0],
vshift=None,min_template_level=1e-4,plotdir=None,plot_allCCresults=False,
subtract_spec_median=True,overwrite=False,verbose=True):
"""
Wrapper around felis cross-correlation template matching, to match a list of spectra with a list of templtes.
--- INPUT ---
spectra fits spectra to find a (cross-correlation) match to template for
templates fits templates to correlate with
speczs Spectroscopic redshifts to perform cross-correlation in rest-frame (shifting the spectrum).
subtract_spec_median Subtract median value of spectrum (approximating the continuum level)
picklename Name of pickle file to store final cross-correlation results in
wavewindow Window (wavecen_restframe * (1+speczs) +/- wavewindow) to perform template matching over.
wavecen_restframe Central rest-frame wavelength of the region to match
vshift If a velcotiy shift is known, provide it here and it will be stored in output (not used)
min_template_level The template is interpolated to the wavelength grid of the spectrum and extrapolated
beyond it's edges if nescessary. In this extrapolation (assuming the template goes to ~0
at the edges), very small values (e.g., <1e-20) can be returned. To set these to 0.0
provide a level below which all values in the interpolated template are treated as 0s.
plotdir Directory to store plots to
plot_allCCresults To plot all the cross-correlation plots, set this to True
overwrite Overwrite existing pickle file if it already exists?
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import felis
import glob
specdir = '/Users/kschmidt/work/MUSE/uvEmissionlineSearch/felis_testing/'
#specs = glob.glob(specdir+'uves_felis_mock_MUSEspectrum_noisesigma*3p0.fits')
specs = glob.glob(specdir+'uves_felis_mock_MUSEspectrum_noisesigma*.fits')
speczs = [3.5]*len(specs)
tempdir = '/Users/kschmidt/work/MUSE/uvEmissionlineSearch/felis_testing/'
#temps = glob.glob(specdir+'uves_felis_template_CIIIdoublet_sig_0p25_fluxCIII1_4p0_flux*.fits')
temps = glob.glob(specdir+'uves_felis_template_CIIIdoublet_*fits')
temps = glob.glob(specdir+'uves_felis_template_CIVdoublet_*fits')
plotdir = '/Users/kschmidt/work/MUSE/uvEmissionlineSearch/felis_testing/plots_CCresults180615/'
pickle = '/Users/kschmidt/work/MUSE/uvEmissionlineSearch/felis_testing/CCresults180615_RENAME_.pkl'
specs = ['/Volumes/DATABCKUP1/TDOSEextractions/171201_TDOSEextraction/Modelimg/tdose_spectra/tdose_spectrum_candels-cdfs-15_modelimg_0115003085-0115003085.fits']
speczs = [3.2585198879241943]
ccdic = felis.match_templates2specs(temps,specs,speczs,pickle,wavewindow=[60]*len(specs),plotdir=plotdir,wavecen_restframe=[1549.0]*len(specs))
--- OUTPUT ---
This wrapper will collect all the cross-correlation results in a main dictionary.
The dictionary will be returned directly but also saved to disk as a pickled filed.
This file can be loaded with: felis.load_picklefile(picklefilename)
The returned dictionary has the following format:
dictionary.keys() = the list of spectra that have been crossmatched (input = 'spectra')
Each entry in the dictionary (dictionary[key]) contains the following entries:
'wavelengths' : The wavelength vector used for the cross-correlation matching of
each of the N templates
'templatevec' : list of each of the N templates matched to spectrum
'zspec' : Spectroscopic redshift for spectrum
'zCCmaxvec' : the redshift corresponding to max S/N for each of the N templates matched
'ccresultsarray_flux' : fluc vectors for each of the N templates matched
'ccresultsarray_variance' : Variance vecotrs for each of the N templates matched
'ccresultsarr_S2N' : S/N vector for each of the N templates matched
'ccresultsarr_chi2' : chi squared values for the cross-correlation of each of the N templates matched
'ccresultsarr_Ngoodent' : The number of good pixels used in the cross correlation for each of
the N templates matched
'S2NCCmaxvec' : vector with max(S/N) values for N templates matched
'continuumlevel' : The 'continuum level' of the spectrum removed in the cross-correlation.
Currently the value is simply the median of the spectrum.
'vshift' : If a velocity shift was provided for the template match this is stored here
The picklefile can be used to assemble sub-sample results (e.g., S/N cuts) based on the
template cross-correlations with
felis.selection_from_picklefile()
And individual entries can be plotted using
felis.plot_picklefilecontent()
"""
ccresultdic = {}
if verbose: print(' - Starting cross-correlation of the '+str(len(spectra))+' spectra and '+
str(len(templates))+' templates')
startstring = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
if verbose: print(' '+startstring+'\n')
if len(spectra) == 0:
sys.exit(' No spectra provided')
if len(templates) == 0:
sys.exit(' No templates provided')
for ss, spec in enumerate(spectra):
# Nwave = pyfits.open(spec)[1].header['NAXIS2']
spec_namebase = spec.split('/')[-1].split('.fit')[0]
for tt, temp in enumerate(templates):
temp_namebase = temp.split('/')[-1].split('.fit')[0]
wavecenter = wavecen_restframe[ss] * (1.0 + speczs[ss])
waverange = [wavecenter-wavewindow[ss],wavecenter+wavewindow[ss]]
wave, ccresults, max_S2N, max_z, continuumlevel = \
felis.cross_correlate_template(spec,temp,z_restframe=speczs[ss],spec_median_sub=subtract_spec_median,
waverange=waverange,min_template_level=min_template_level,verbose=verbose)
if tt == 0:
ccresultsarr_flux = np.array(ccresults[:,0])
ccresultsarr_variance = ccresults[:,1]
ccresultsarr_S2N = ccresults[:,2]
ccresultsarr_chi2 = ccresults[:,3]
ccresultsarr_Ngoodent = ccresults[:,4]
templatevec = np.array([temp])
S2NCCmaxvec = np.array([max_S2N])
zCCmaxvec = | np.array([max_z]) | numpy.array |
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
from ...utils import perf_dict
from .utils import EBMUtils
from .internal import NativeEBM
from ...utils import unify_data, autogen_schema
from ...api.base import ExplainerMixin
from ...api.templates import FeatureValueExplanation
from ...utils import JobLibProvider
from ...utils import gen_name_from_class, gen_global_selector, gen_local_selector
from ...visual.plot import plot_continuous_bar, plot_horizontal_bar, sort_take
import numpy as np
from sklearn.base import is_classifier, clone
from sklearn.utils.validation import check_is_fitted
from sklearn.metrics import roc_auc_score, mean_squared_error
from collections import Counter
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin, RegressorMixin
from sklearn.model_selection import train_test_split
from contextlib import closing
from itertools import combinations
import logging
log = logging.getLogger(__name__)
class EBMExplanation(FeatureValueExplanation):
""" Visualizes specifically for EBM.
"""
explanation_type = None
def __init__(self, explanation_type, internal_obj,
feature_names=None, feature_types=None,
name=None, selector=None):
super(EBMExplanation, self).__init__(
explanation_type, internal_obj,
feature_names=feature_names,
feature_types=feature_types,
name=name,
selector=selector
)
def visualize(self, key=None):
data_dict = self.data(key)
if data_dict is None:
return None
if self.explanation_type == 'global' and key is None:
data_dict = sort_take(
data_dict, sort_fn=lambda x: -abs(x), top_n=15,
reverse_results=True,
)
figure = plot_horizontal_bar(
data_dict, title='Overall Importance:<br>Mean Absolute Score',
start_zero=True,
)
return figure
if self.explanation_type == 'global' and self.feature_types[key] == 'continuous':
title = self.feature_names[key]
figure = plot_continuous_bar(data_dict, title=title)
return figure
return super().visualize(key)
# TODO: More documentation in binning process to be explicit.
# TODO: Consider stripping this down to the bare minimum.
class EBMPreprocessor(BaseEstimator, TransformerMixin):
""" Transformer that preprocesses data to be ready before EBM. """
def __init__(self, schema=None, cont_n_bins=255,
missing_constant=0, unknown_constant=0, feature_names=None):
""" Initializes EBM preprocessor.
Args:
schema: A dictionary that encapsulates column information,
such as type and domain.
cont_n_bins: Max number of bins to process numeric features.
missing_constant: Missing encoded as this constant.
unknown_constant: Unknown encoded as this constant.
feature_names: Feature names as list.
"""
self.schema = schema
self.cont_n_bins = cont_n_bins
self.missing_constant = missing_constant
self.unknown_constant = unknown_constant
self.feature_names = feature_names
def fit(self, X):
""" Fits transformer to provided instances.
Args:
X: Numpy array for training instances.
Returns:
Itself.
"""
# self.col_bin_counts_ = {}
self.col_bin_edges_ = {}
self.hist_counts_ = {}
self.hist_edges_ = {}
self.col_mapping_ = {}
self.col_mapping_counts_ = {}
self.col_n_bins_ = {}
self.col_names_ = []
self.col_types_ = []
self.has_fitted_ = False
# TODO: Remove this.
if self.schema is not None:
self.schema_ = self.schema
else:
self.schema_ = autogen_schema(X, feature_names=self.feature_names)
self.schema_ = self.schema if self.schema is not None else autogen_schema(
X, feature_names=self.feature_names
)
schema = self.schema_
for col_idx in range(X.shape[1]):
col_name = list(schema.keys())[col_idx]
self.col_names_.append(col_name)
col_info = schema[col_name]
assert (col_info['column_number'] == col_idx)
col_data = X[:, col_idx]
self.col_types_.append(col_info['type'])
if col_info['type'] == 'continuous':
col_data = col_data.astype(float)
uniq_vals = set(col_data[~ | np.isnan(col_data) | numpy.isnan |
import os
import pickle, glob, shutil
import numpy as np
import pandas as pd
from Fuzzy_clustering.ver_tf2.utils_for_forecast import split_continuous
from Fuzzy_clustering.ver_tf2.RBFNN_module import rbf_model
from Fuzzy_clustering.ver_tf2.RBF_ols import rbf_ols_module
from Fuzzy_clustering.ver_tf2.CNN_module import cnn_model
from Fuzzy_clustering.ver_tf2.CNN_module_3d import cnn_3d_model
from Fuzzy_clustering.ver_tf2.LSTM_module_3d import lstm_3d_model
from Fuzzy_clustering.ver_tf2.Combine_module_train import combine_model
from Fuzzy_clustering.ver_tf2.Clusterer import clusterer
from Fuzzy_clustering.ver_tf2.Global_predict_regressor import global_predict
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from datetime import datetime
from Fuzzy_clustering.ver_tf2.imblearn.over_sampling import BorderlineSMOTE, SVMSMOTE, SMOTE,ADASYN
import time, logging, warnings, joblib
class global_train(object):
def __init__(self, static_data, x_scaler):
self.istrained = False
self.cluster_dir=os.path.join(static_data['path_model'], 'Global_regressor')
try:
self.load(self.cluster_dir)
except:
pass
self.static_data=static_data
self.model_type=static_data['type']
self.x_scaler = x_scaler
self.methods=static_data['project_methods']
self.combine_methods=static_data['combine_methods']
self.rated=static_data['rated']
self.n_jobs=static_data['njobs']
self.var_lin = static_data['clustering']['var_lin']
self.cluster_dir=os.path.join(static_data['path_model'], 'Global_regressor')
self.data_dir = os.path.join(self.cluster_dir, 'data')
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
logger = logging.getLogger('Glob_train_procedure' + '_' +self.model_type)
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(self.cluster_dir, 'log_train_procedure.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
self.logger = logger
def move_files(self, path1, path2):
for filename in glob.glob(os.path.join(path1, '*.*')):
shutil.copy(filename, path2)
def split_dataset(self, X, y, act, X_cnn=np.array([]), X_lstm=np.array([])):
if len(y.shape)>1:
y=y.ravel()
if len(act.shape)>1:
act=act.ravel()
self.N_tot, self.D = X.shape
X_train, X_test1, y_train, y_test1, mask_test1 = split_continuous(X, y, test_size=0.15, random_state=42, mask=False)
cvs = []
for _ in range(3):
X_train1 = np.copy(X_train)
y_train1 = np.copy(y_train)
X_train1, X_val, y_train1, y_val = train_test_split(X_train1, y_train1, test_size=0.15)
cvs.append([X_train1, y_train1, X_val, y_val, X_test1, y_test1])
self.N_train = cvs[0][0].shape[0]
self.N_val = cvs[0][2].shape[0] + cvs[0][4].shape[0]
return cvs, mask_test1, X, y, act, X_cnn, X_lstm
def find_features(self, cvs, method, njobs):
if method=='boruta':
from Fuzzy_clustering.ver_tf2.Feature_selection_boruta import FS
else:
from Fuzzy_clustering.ver_tf2.Feature_selection_permutation import FS
fs=FS(self.cluster_dir, 2*njobs)
self.features=fs.fit(cvs)
self.save(self.cluster_dir)
def split_test_data(self, X, y, act, X_cnn=np.array([]), X_lstm=np.array([]), test_indices =None):
self.N_tot, self.D = X.shape
if not test_indices is None:
X_test = X.loc[test_indices['dates_test']]
y_test = y.loc[test_indices['dates_test']]
act_test = act.loc[test_indices['dates_test']]
X = X.loc[test_indices['dates_train']]
y = y.loc[test_indices['dates_train']]
act = act.loc[test_indices['dates_train']]
if len(X_cnn.shape) > 1:
X_cnn_test = X_cnn[test_indices['indices_test']]
X_cnn = X_cnn[test_indices['indices_train']]
else:
X_cnn_test = np.array([])
if len(X_lstm.shape) > 1:
X_lstm_test = X_lstm[test_indices['indices_test']]
X_lstm = X_lstm[test_indices['indices_train']]
else:
X_lstm_test = np.array([])
else:
X_test = pd.DataFrame([])
y_test = pd.DataFrame([])
act_test = pd.DataFrame([])
X_cnn_test = np.array([])
X_lstm_test = np.array([])
self.N_test = X_test.shape[0]
return X, y, act, X_cnn, X_lstm, X_test, y_test, act_test, X_cnn_test, X_lstm_test
def load_data(self):
data_path = self.data_dir
X = pd.read_csv(os.path.join(data_path, 'dataset_X.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
y = pd.read_csv(os.path.join(data_path, 'dataset_y.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
act = pd.read_csv(os.path.join(data_path, 'dataset_act.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
if os.path.exists(os.path.join(data_path, 'dataset_cnn.pickle')):
X_cnn = joblib.load(os.path.join(data_path, 'dataset_cnn.pickle'))
if X_cnn.shape[1]==6:
X_cnn = X_cnn.transpose([0, 2, 3, 1])
else:
X_cnn = np.array([])
if os.path.exists(os.path.join(data_path, 'dataset_lstm.pickle')):
X_lstm = joblib.load(os.path.join(data_path, 'dataset_lstm.pickle'))
else:
X_lstm = np.array([])
if os.path.exists(os.path.join(self.data_dir, 'test_indices.pickle')):
test_indices = joblib.load(os.path.join(self.data_dir, 'test_indices.pickle'))
else:
test_indices = None
return X, y, act, X_cnn, X_lstm, test_indices
def fit(self):
self.logger.info('Start training Global models')
self.logger.info('/n')
X, y, act, X_cnn, X_lstm, test_indices = self.load_data()
self.variables = X.columns
indices = X.index
X, y, act, X_cnn, X_lstm, X_test, y_test, act_test, X_cnn_test, X_lstm_test = self.split_test_data(X, y,
act,
X_cnn=X_cnn,
X_lstm=X_lstm,
test_indices=test_indices)
if X_test.shape[0]>0:
lin_models = LinearRegression().fit(X[self.var_lin].values, y.values.ravel())
preds = lin_models.predict(X_test[self.var_lin].values).ravel()
err = (preds - y_test.values.ravel()) / 20
rms = np.sum(np.square(err))
mae = np.mean(np.abs(err))
print('rms = %s', rms)
print('mae = %s', mae)
self.logger.info("Objective from linear models: %s", mae)
X = X.values
y = y.values / 20
act = act.values
if len(y.shape)==1:
y = y[:, np.newaxis]
if len(act.shape)==1:
act = act[:, np.newaxis]
try:
self.load(self.cluster_dir)
except:
pass
if hasattr(self, 'features') and self.static_data['train_online'] == False:
pass
else:
if self.static_data['sklearn']['fs_status'] != 'ok':
X_train, X_test1, y_train, y_test1 = split_continuous(X, y, test_size=0.15, random_state=42)
cvs = []
for _ in range(3):
X_train1 = np.copy(X_train)
y_train1 = np.copy(y_train)
X_train1, X_val, y_train1, y_val = train_test_split(X_train1, y_train1, test_size=0.15)
cvs.append([X_train1, y_train1, X_val, y_val, X_test1, y_test1])
self.find_features(cvs, self.static_data['sklearn']['fs_method'], self.static_data['sklearn']['njobs'])
cvs, mask_test1, X, y, act, X_cnn, X_lstm = self.split_dataset(X, y, act, X_cnn, X_lstm)
self.indices = indices[:X.shape[0]]
for i in range(3):
cvs[i][0] = cvs[i][0][:, self.features]
cvs[i][2] = cvs[i][2][:, self.features]
cvs[i][4] = cvs[i][4][:, self.features]
self.logger.info('Data info for Global models')
self.logger.info('Number of variables %s', str(self.D))
self.logger.info('Number of total samples %s', str(self.N_tot))
self.logger.info('Number of training samples %s', str(self.N_train))
self.logger.info('Number of validation samples %s', str(self.N_val))
self.logger.info('Number of testing samples %s', str(self.N_test))
self.logger.info('/n')
self.models = dict()
for method in self.static_data['project_methods'].keys():
if self.static_data['project_methods'][method]['Global'] == True:
self.logger.info('Training start of method %s', method)
self.logger.info('/n')
if 'sklearn_method' in self.static_data['project_methods'][method].keys():
optimize_method = self.static_data['project_methods'][method]['sklearn_method']
else:
optimize_method = []
self.fit_model(cvs, method, self.static_data, self.cluster_dir, optimize_method, X_cnn=X_cnn, X_lstm=X_lstm, y=y, rated=1)
self.logger.info('Training end of method %s', method)
comb_model = combine_model(self.static_data, self.cluster_dir, x_scaler=self.x_scaler,is_global=True)
if comb_model.istrained == False and X_test.shape[0] > 0:
comb_model.train(X_test, y_test, act_test, X_cnn_test, X_lstm_test)
predict_module = global_predict(self.static_data)
predictions = predict_module.predict(X_test.values, X_cnn=X_cnn_test, X_lstm= X_lstm_test)
result = predict_module.evaluate(predictions, y_test.values)
result.to_csv(os.path.join(self.data_dir, 'result_test.csv'))
self.logger.info('Training end for Global models')
self.logger.info('/n')
self.istrained = True
self.save(self.cluster_dir)
return self.to_dict()
def to_dict(self):
dict = {}
for k in self.__dict__.keys():
if k not in ['logger']:
dict[k] = self.__dict__[k]
return dict
def fit_model(self, cvs, method, static_data, cluster_dir, optimize_method, X_cnn=np.array([]), X_lstm= | np.array([]) | numpy.array |
import numpy as np
import pandas as pd
from cvxopt import matrix
from cvxopt import solvers
# Non verbose
solvers.options['show_progress'] = False
class qp_solver:
def __init__(self, df:pd.DataFrame, limits:np.ndarray=None, col_index:str='index'):
self.df = df.copy()
self.col_index = col_index
self.weights = df.loc[:, ~df.columns.str.match(self.col_index)].columns.to_numpy().tolist()
self.limits = limits
def _H_matrix(self):
df = self.df.copy()
df = df.loc[:, ~df.columns.str.match(self.col_index)]
N = df.shape[1]
T = df.shape[0]
colnames = df.columns.to_numpy()
H_mat = np.zeros((N, N))
for i, col_i in enumerate(colnames):
for j, col_j in enumerate(colnames):
value = np.dot(df[col_i].copy().to_numpy() ,
df[col_j].copy().to_numpy()) / T
H_mat[i, j] = value
return H_mat
def _g_matrix(self):
df = self.df.copy()
N = df.loc[:, ~df.columns.str.match(self.col_index)].shape[1]
T = df.shape[0]
colnames_not_index = df.loc[:, ~df.columns.str.match(self.col_index)].columns.to_numpy()
g_vec = np.zeros(N)
for i, col_i in enumerate(colnames_not_index):
value = np.dot(df[col_i].copy().to_numpy(),
df[self.col_index].copy().to_numpy()) / T
g_vec[i] = value
return -g_vec
def _linear_restrictions(self):
df = self.df.copy()
N = df.loc[:, ~df.columns.str.match(self.col_index)].shape[1]
A = np.repeat(1, N)
b = np.array([1])
A = np.reshape(A, (1, N))
b = np.reshape(b, (1,1))
return A,b
def _linear_inequealities(self):
df = self.df.copy()
N = df.loc[:, ~df.columns.str.match(self.col_index)].shape[1]
Z = -np.identity(N)
p = np.repeat([0], N).transpose()
p = | np.reshape(p, (N,1)) | numpy.reshape |
import os
from collections import defaultdict
import numpy as np
import torch
from .cell_level_analysis import CellLevelAnalysisWithTableBase, _load_image_outliers
from ..base import BatchJobOnContainer
from ..util import get_logger
from ..util.io import (open_file, image_name_to_well_name,
in_file_to_image_name, in_file_to_plate_name,
add_site_name_to_image_table)
logger = get_logger('Workflow.BatchJob.ExtractBackground')
class BackgroundFromWells(CellLevelAnalysisWithTableBase):
def __init__(self, well_list, output_table, channel_names, seg_key, **super_kwargs):
self.well_list = well_list
self.output_table = output_table
self.channel_names = channel_names
super().__init__(cell_seg_key=seg_key, table_out_keys=[output_table],
image_input_keys=channel_names, validate_cell_classification=False,
**super_kwargs)
def bg_for_channel(self, input_files, channel_name):
bg_values = []
for in_file in input_files:
with open_file(in_file, 'r') as f:
values = self.read_image(f, channel_name).flatten()
bg_values.append(values)
bg_values = np.concatenate(bg_values)
median = np.median(bg_values)
mad = np.median(np.abs(bg_values - median))
min_well = ','.join(self.well_list)
logger.info(f"{self.name}: background value {median} was extracted from wells {min_well} for {channel_name}")
col_names = [f'{channel_name}_min_well', f'{channel_name}_median', f'{channel_name}_mad']
values = [min_well, median, mad]
return col_names, values
def run(self, input_files, output_files):
plate_name = os.path.split(self.input_folder)[1]
columns = ['plate_name']
table = [plate_name]
well_names = [image_name_to_well_name(in_file_to_image_name(in_file))
for in_file in input_files]
if not all([well in well_names for well in self.well_list]):
raise RuntimeError(f"Could not find all min wells")
inputs = [in_file for in_file, well_name in zip(input_files, well_names)
if well_name in self.well_list]
for channel in self.channel_names:
col_names, values = self.bg_for_channel(inputs, channel)
columns.extend(col_names)
table.extend(values)
table = np.array(table)[None]
with open_file(self.table_out_path, 'a') as f:
self.write_table(f, self.output_table, columns, table, force_write=True)
class BackgroundFromMinWell(BatchJobOnContainer):
def __init__(self, bg_table, output_table, channel_names,
min_background_fraction, max_background_fraction):
self.bg_table = bg_table
self.output_table = output_table
in_pattern = '*.hdf5'
super().__init__(input_pattern=in_pattern,
input_key=self.bg_table,
input_format='table',
output_key=self.output_table, output_format='table')
self.channel_names = channel_names
self.min_background_fraction = min_background_fraction
self.max_background_fraction = max_background_fraction
def run(self, input_files, output_files):
if len(input_files) != 1 or len(output_files) != 1:
raise ValueError(f"{self.name}: expect only a single table file, not {len(input_files)}")
in_file, out_file = input_files[0], output_files[0]
with open_file(in_file, 'r') as f:
col_names, table = self.read_table(f, self.bg_table)
# get the background fraction and find wells that don't
# have enough background
bg_fraction = table[:, col_names.index('background_fraction')]
well_names = table[:, col_names.index('well_name')]
invalid_wells = np.logical_or(bg_fraction < self.min_background_fraction,
bg_fraction > self.max_background_fraction)
logger.info(f"{self.name}: {invalid_wells.sum()} wells will not be considered for the min background")
logger.info(f"{self.name}: because they have a smaller background fraction than {self.min_background_fraction}")
logger.info(f"{self.name}: or a larger background fraction than {self.max_background_fraction}")
logger.debug(f"{self.name}: the following wells are invalid {well_names[invalid_wells]}")
plate_name = os.path.split(self.input_folder)[1]
out_col_names = ['plate_name']
out_table = [plate_name]
for channel_name in self.channel_names:
median_col_name = f'{channel_name}_median'
mad_col_name = f'{channel_name}_mad'
medians = table[:, col_names.index(median_col_name)]
mads = table[:, col_names.index(mad_col_name)]
medians[invalid_wells] = np.inf
min_id = np.argmin(medians)
min_well, min_median, min_mad = well_names[min_id], medians[min_id], mads[min_id]
if not np.isfinite(min_median):
raise RuntimeError(f"{self.name}: median background value is not finite")
msg = f"{self.name}: min well {min_well} with median background {min_median} for channel {channel_name}"
logger.info(msg)
out_table.extend([min_well, min_median, min_mad])
out_col_names.extend([f'{channel_name}_min_well', median_col_name, mad_col_name])
assert len(out_table) == len(out_col_names)
out_table = np.array(out_table)[None]
with open_file(out_file, 'a') as f:
self.write_table(f, self.output_table, out_col_names, out_table)
class ExtractBackground(CellLevelAnalysisWithTableBase):
def __init__(self, cell_seg_key, channel_keys,
image_outlier_table='images/outliers',
identifier=None, **super_kwargs):
self.cell_seg_key = cell_seg_key
self.channel_keys = channel_keys
self.image_outlier_table = image_outlier_table
group_name = 'backgrounds' if identifier is None else f'backgrounds_{identifier}'
self.image_table_key = 'images/' + group_name
self.well_table_key = 'wells/' + group_name
self.plate_table_key = 'plate/' + group_name
input_keys = [self.cell_seg_key] + self.channel_keys
table_out_keys = [self.image_table_key, self.well_table_key, self.plate_table_key]
super().__init__(cell_seg_key=cell_seg_key, table_out_keys=table_out_keys,
identifier=identifier, image_input_keys=input_keys,
validate_cell_classification=False,
**super_kwargs)
def get_bg_segment(self, path, device):
with open_file(path, 'r') as f:
channels = [self.read_image(f, key) for key in self.channel_keys]
cell_seg = self.read_image(f, self.cell_seg_key)
n_pixels = cell_seg.size
channels = [torch.FloatTensor(channel.astype(np.float32)).to(device) for channel in channels]
cell_seg = torch.LongTensor(cell_seg.astype(np.int32)).to(device)
bg_mask = cell_seg == 0
bg_fraction = bg_mask.sum() / float(n_pixels)
return torch.stack(channels)[:, bg_mask], bg_fraction
def get_bg_stats(self, bg_values):
if bg_values is None:
return {'median': [np.nan] * len(self.channel_keys),
'mad': [np.nan] * len(self.channel_keys)}
# bg_vales should have shape n_channels, n_pixels
bg_values = bg_values.cpu().numpy().astype(np.float32)
medians = np.median(bg_values, axis=1)
mads = np.median( | np.abs(bg_values - medians[:, None]) | numpy.abs |
import sys
import re
from copy import deepcopy
import numpy as np
import pandas as pd
from sklearn.cluster import MiniBatchKMeans
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit import DataStructs
import numpy.linalg as LA
from typing import Union
from .fingerprint_utils import csc_drop_zerocols
class TargetProduct(object):
def __init__(self, smi, similarity='tanimoto', verbose=False):
self.smi = smi
self.mol = Chem.MolFromSmiles(smi)
self.fp = AllChem.GetMorganFingerprint(self.mol, radius=2, useFeatures=False)
self.l2 = LA.norm(list(self.fp.GetNonzeroElements().values()))
self.l1 = LA.norm(list(self.fp.GetNonzeroElements().values()), 1)
self.similarity = similarity
self.verbose = verbose
def calc_ts(self, smi, distance=False):
""" Calculate tanimoto similarity between target molecule and predicted product arrary.
"""
try:
mol = Chem.MolFromSmiles(smi)
fp = AllChem.GetMorganFingerprint(mol, radius=2, useFeatures=False)
sim = DataStructs.TanimotoSimilarity(self.fp, fp, returnDistance=distance)
except Exception as e:
if distance:
sim = 1
else:
sim = 0
if self.verbose:
print('Original SMILES: {}'.format(smi), file=sys.stderr)
# print(e, file=sys.stderr)
return sim
def calc_l2(self, smi):
try:
mol = Chem.MolFromSmiles(smi)
fp = self.fp - AllChem.GetMorganFingerprint(mol, radius=2, useFeatures=False)
l2 = LA.norm(list(fp.GetNonzeroElements().values()))
except Exception as e:
# l2 = self.l2
l2 = 9999
if self.verbose:
print('Original SMILES: {}'.format(smi), file=sys.stderr)
# print(e, file=sys.stderr)
return l2
def calc_l1(self, smi):
try:
mol = Chem.MolFromSmiles(smi)
fp = self.fp - AllChem.GetMorganFingerprint(mol, radius=2, useFeatures=False)
l1 = LA.norm(list(fp.GetNonzeroElements().values()), 1)
except Exception as e:
# l1 = self.l1
l1 = 9999
if self.verbose:
print('Original SMILES: {}'.format(smi), file=sys.stderr)
# print(e, file=sys.stderr)
return l1
def distance(self, products_array):
products_distance = list()
if self.similarity == 'tanimoto':
for products_each_reaction in products_array:
distance_each_reaction = \
[self.calc_ts(smi, distance=True) for smi in products_each_reaction]
products_distance.append(distance_each_reaction)
elif self.similarity == 'euclidean':
for products_each_reaction in products_array:
distance_each_reaction = [self.calc_l2(smi) for smi in products_each_reaction]
products_distance.append(distance_each_reaction)
elif self.similarity == 'manhattan':
for products_each_reaction in products_array:
distance_each_reaction = [self.calc_l1(smi) for smi in products_each_reaction]
products_distance.append(distance_each_reaction)
else:
raise NotImplementedError
return pd.DataFrame(products_distance)
def likelihood(self, products_array, scores_array):
products_sim = pd.DataFrame(products_array)
if self.similarity == 'tanimoto':
products_sim = products_sim.applymap(self.calc_ts)
else:
raise NotImplementedError
scores_array = | np.exp(scores_array) | numpy.exp |
import os
import pandas as pd
import numpy as np
def cal_two_step_feature(id_loc):
id_dat = two_step_label[int(id_loc[1]):int(id_loc[2])]
label = id_dat['label']
weighted_label = id_dat['weighted_label']
# 开始计算特征
length = len(id_dat)
# 开始计算label特征
zero_num = np.sum(label == 0)
one_num = np.sum(label != 0)
zero_ratio = zero_num / length
one_ratio = one_num / length
# 开始计算weighted_label特征
p_0 = np.min(weighted_label)
p_25 = | np.percentile(weighted_label, 25) | numpy.percentile |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 30 13:12:06 2020
@author: peter
"""
import numpy as np
from pathlib import Path
import shutil
import json
import tifffile
import quantities as pq
import scipy.interpolate as interp
import scipy.ndimage as ndimage
import scipy.signal as signal
import pandas as pd
import datetime
import pdb
import re
import f.general_functions as gf
import f.ephys_functions as ef
def get_events_exclude_surround_events(
tc,
std,
surround_tc,
surround_std,
z_score=3,
surround_z=7,
exclude_first=0,
max_overlap=0.75,
excluded_circle=None,
excluded_dead=None,
):
ev = detect_events(tc, std, z_score=z_score, exclude_first=exclude_first)
surrounds_ev = detect_events(
tc, std, z_score=surround_z, exclude_first=exclude_first
)
excluded_dict = {}
dict_drop = []
for key in ev.keys():
if type(key) == str:
continue
if key not in surrounds_ev.keys():
continue
sur_e = surrounds_ev[key].T
e = ev[key].T
# if a detected surround event overlaps for more than max_overlap, then remove
# detects any overlaps
overlapping = np.logical_and(
e[:, 0, None] < sur_e[None, :, 1], e[:, 1, None] >= sur_e[None, :, 0]
)
if not np.any(overlapping):
continue
drop = []
wh = np.where(overlapping)
# now detect size of overlap and delete if proportionally greater than max overlap
for idx in range(len(wh[0])):
overlap = min(e[wh[0][idx], 1], sur_e[wh[1][idx], 1]) - max(
e[wh[0][idx], 0], sur_e[wh[1][idx], 0]
)
if overlap > max_overlap * (e[wh[0][idx], 1] - e[wh[0][idx], 0]):
drop.append(wh[0][idx])
# pdb.set_trace()
exc_e = np.array([x for ii, x in enumerate(e) if ii in drop])
keep_e = np.array([x for ii, x in enumerate(e) if ii not in drop])
excluded_dict[key] = exc_e.T
if len(keep_e) > 0:
ev[key] = keep_e.T
else:
dict_drop.append(key)
# delete empty fields
for key in dict_drop:
del ev[key]
# exclude ROIs on edge of illumination
if excluded_circle is not None:
circle_dict = {}
for idx in excluded_circle:
if idx in ev.keys():
circle_dict[idx] = ev[idx]
del ev[idx]
ev["excluded_circle_events"] = circle_dict
# exclude ROIs on edge of illumination
if excluded_dead is not None:
dead_dict = {}
if len(excluded_dead) > 0:
for idx in excluded_dead:
if idx in ev.keys():
dead_dict[idx] = ev[idx]
del ev[idx]
else:
pass
ev["excluded_dead_events"] = dead_dict
# include the surround data
ev["surround_events"] = surrounds_ev
ev["excluded_events"] = excluded_dict
return ev
def get_events_exclude_simultaneous_events(
tc,
std,
z_score=3,
exclude_first=0,
max_events=5,
overlap=0.75,
excluded_circle=None,
excluded_dead=None,
):
ev, excluded_dict = detect_events_remove_simultaneous(
tc,
std,
z_score=z_score,
exclude_first=exclude_first,
max_overlap=overlap,
max_events=max_events,
)
# exclude ROIs on edge of illumination
if excluded_circle is not None:
circle_dict = {}
for idx in excluded_circle:
if idx in ev.keys():
circle_dict[idx] = ev[idx]
del ev[idx]
ev["excluded_circle_events"] = circle_dict
# exclude ROIs on edge of illumination
if excluded_dead is not None:
dead_dict = {}
if len(excluded_dead) > 0:
for idx in excluded_dead:
if idx in ev.keys():
dead_dict[idx] = ev[idx]
del ev[idx]
else:
pass
ev["excluded_dead_events"] = dead_dict
ev["excluded_events"] = excluded_dict
ev["surround_events"] = excluded_dict
print("Check this - surrounds and exclude the same")
return ev
def detect_events_remove_simultaneous(
tc, std, z_score=3, exclude_first=0, max_events=5, max_overlap=0.5
):
tc_filt = ndimage.gaussian_filter(tc, (0, 3))
std_filt = ndimage.gaussian_filter(std, (0, 3))
tc_filt[:, :exclude_first] = 1
events = np.abs(tc_filt - 1) > z_score * std_filt
# Use closing to join split events and remove small events
struc = np.zeros((3, 5))
struc[1, :] = 1
events = ndimage.binary_opening(events, structure=struc, iterations=2)
events = ndimage.binary_closing(events, structure=struc, iterations=2)
# now count simultaneous events and remove those where they are
num_events = np.sum(events, 0)
excluded_events = num_events > max_events
excluded_time = np.where(excluded_events)[0]
wh = np.where(events)
idxs, locs = np.unique(wh[0], return_index=True)
locs = np.append(locs, len(wh[0]))
excluded_result = {}
result = {}
for i, idx in enumerate(idxs):
llocs = wh[1][locs[i] : locs[i + 1]]
split_locs = np.array(recursive_split_locs(llocs))
# check if they have both positive and negative going - messes with integration later
t = tc_filt[idx, :]
corr_locs = correct_event_signs(t, split_locs)
overlap = np.sum(np.isin(llocs, excluded_time).astype(int)) / len(llocs)
if overlap > max_overlap:
excluded_result[idx] = corr_locs.T
else:
result[idx] = corr_locs.T
result["tc_filt"] = tc_filt
result["tc"] = tc
return result, excluded_result
def get_surround_masks(masks, surround_rad=20, dilate=True):
def get_bounding_circle_radius(masks):
rows, cols = np.any(masks, axis=-1), np.any(masks, axis=-2)
rs = np.apply_along_axis(first_last, -1, rows)
cs = np.apply_along_axis(first_last, -1, cols)
centers = np.array(
[rs[:, 0] + (rs[:, 1] - rs[:, 0]) / 2, cs[:, 0] + (cs[:, 1] - cs[:, 0]) / 2]
).T
# bounding radius is the hypotenuse /2
radii = np.sqrt((cs[:, 0] - cs[:, 0]) ** 2 + (rs[:, 1] - rs[:, 0]) ** 2) / 2
return radii, centers
def first_last(arr_1d):
return np.where(arr_1d)[0][[0, -1]]
# avoid border effects/bleedthrough by dilating existing rois
structure = np.ones((3, 3, 3))
structure[0::2, ...] = 0
dilated_masks = ndimage.binary_dilation(masks, structure=structure, iterations=4)
roi_rads, centers = get_bounding_circle_radius(dilated_masks)
x, y = np.indices(masks.shape[-2:])
rs = np.sqrt(
(x[None, ...] - centers[:, 0, None, None]) ** 2
+ (y[None, ...] - centers[:, 1, None, None]) ** 2
)
surround_roi = np.logical_xor(
dilated_masks, rs < roi_rads[:, None, None] + surround_rad
)
return surround_roi
def get_surround_masks_cellfree(masks, surround_rad=50, dilate=True):
all_masks = np.any(masks, axis=0)
# avoid border effects/bleedthrough by dilating existing rois
structure = np.ones((3, 3, 3))
structure[0::2, ...] = 0
dilated_masks = ndimage.binary_dilation(masks, structure=structure, iterations=4)
centers = np.array([ndimage.center_of_mass(m) for m in dilated_masks])
x, y = np.indices(masks.shape[-2:])
rs = np.sqrt(
(x[None, ...] - centers[:, 0, None, None]) ** 2
+ (y[None, ...] - centers[:, 1, None, None]) ** 2
)
surround_roi = np.logical_and(~all_masks, rs < surround_rad)
# see if the area is too small
areas = np.sum(surround_roi, axis=(-2, -1))
# check nowhere too small
small = areas < 2000
if np.any(small):
for new_rs in range(surround_rad, 2 * surround_rad, 10):
small = areas < 2000
surround_roi[small] = np.logical_and(~all_masks, rs[small, ...] < new_rs)
if not np.any(small):
break
small = areas < 2000
# revert back to normal behaviour - just take an area around and dont care about cells
if np.any(small):
surround_roi[small] = np.logical_and(masks[small], rs[small, ...] < new_rs)
return surround_roi
def get_observation_length(event_dict):
tc = event_dict["tc_filt"]
exclude_dict = event_dict["surround_events"]
length = tc.shape[1]
lengths = []
# count as non-observed any time during a surround event
for i in range(tc.shape[0]):
if i in exclude_dict.keys():
lengths.append(
length - np.sum(exclude_dict[i].T[:, 1] - exclude_dict[i].T[:, 0])
)
else:
lengths.append(length)
return np.array(lengths)
def apply_exclusion(exclude_dict, tc):
excluded_tc = np.copy(tc)
for roi in exclude_dict.keys():
for i in range(exclude_dict[roi].shape[-1]):
ids = exclude_dict[roi][:, i]
excluded_tc[roi, ids[0] : ids[1]] = 1
return excluded_tc
def soft_threshold(arr, thresh, to=1):
# Thresholds towards to value
res = np.copy(arr)
wh = np.where(np.abs(arr - to) < thresh)
n_wh = np.where(np.abs(arr - to) >= thresh)
sgn = np.sign(arr - to)
res[wh] = to
res[n_wh] -= sgn[n_wh] * thresh
return res
def split_event(t, ids):
# splits a zero-(actually 1) crossing event into multiple non-zero crossing events recursively
# removes one point
if not np.logical_and(
np.any(t[ids[0] : ids[1]] - 1 > 0), np.any(t[ids[0] : ids[1]] - 1 < 0)
):
return [tuple(ids)]
else:
zer_loc = np.argmin(np.abs(t[ids[0] : ids[1]] - 1)) + ids[0]
return split_event(t, (ids[0], zer_loc)) + split_event(t, (zer_loc + 1, ids[1]))
def correct_event_signs(t, llocs):
corr_locs = []
for id_idx, ids in enumerate(llocs):
if np.logical_and(
np.any(t[ids[0] : ids[1]] - 1 > 0), np.any(t[ids[0] : ids[1]] - 1 < 0)
):
split_ids = split_event(t, ids)
corr_locs.extend(split_ids)
else:
corr_locs.append(ids)
corr_locs = np.array(corr_locs)
# if we have split into a zero size (due to boundary issue in split events), remove
if np.any((corr_locs[:, 1] - corr_locs[:, 0]) < 1):
corr_locs = corr_locs[(corr_locs[:, 1] - corr_locs[:, 0]) > 0]
return corr_locs
def recursive_split_locs(seq):
# splits a sequence into n adjacent sequences
diff = np.diff(seq)
if not np.any(diff != 1):
return [(seq[0], seq[-1])]
else:
wh = np.where(diff != 1)[0][0] + 1
return recursive_split_locs(seq[:wh]) + recursive_split_locs(seq[wh:])
def detect_events(tc, std, z_score=3, exclude_first=0):
tc_filt = ndimage.gaussian_filter(tc, (0, 3))
std_filt = ndimage.gaussian_filter(std, (0, 3))
tc_filt[:, :exclude_first] = 1
events = np.abs(tc_filt - 1) > z_score * std_filt
# Use closing to join split events and remove small events
struc = np.zeros((3, 5))
struc[1, :] = 1
events = ndimage.binary_opening(events, structure=struc, iterations=2)
events = ndimage.binary_closing(events, structure=struc, iterations=2)
wh = np.where(events)
idxs, locs = np.unique(wh[0], return_index=True)
locs = np.append(locs, len(wh[0]))
result = {}
for i, idx in enumerate(idxs):
llocs = wh[1][locs[i] : locs[i + 1]]
split_locs = np.array(recursive_split_locs(llocs))
# check if they have both positive and negative going - messes with integration later
t = tc_filt[idx, :]
corr_locs = correct_event_signs(t, split_locs)
result[idx] = corr_locs.T
result["tc_filt"] = tc_filt
result["tc"] = tc
return result
def get_event_properties(event_dict, use_filt=True):
if use_filt:
t = event_dict["tc"]
else:
t = event_dict["tc_filt"]
result_dict = {}
for idx in event_dict.keys():
if type(idx) == str:
continue
event_properties = []
for locs in event_dict[idx].T:
if np.logical_and(
np.any(t[idx, locs[0] : locs[1]] - 1 > 0),
np.any(t[idx, locs[0] : locs[1]] - 1 < 0),
):
print(idx, locs)
raise ValueError("This shouldnt happen")
event_length = locs[1] - locs[0]
event_amplitude = (
t[idx, np.argmax(np.abs(t[idx, locs[0] : locs[1]] - 1)) + locs[0]] - 1
)
event_integrated = np.sum(t[idx, locs[0] : locs[1]] - 1)
event_properties.append([event_length, event_amplitude, event_integrated])
if len( | np.array(event_properties) | numpy.array |
# Code from Hayes 2016- k- fingerprinting
import math
import numpy as np
# re-seed the generator
#np.random.seed(1234)
#1. dictionary_() will extract features and write them to a target file (kFPdict) in the data folder
#2. calls RF_openworld(), which starts by dividing kFPdict into training and testing sets
#3. # -1 is IN, 1 is OUT
#file format: "direction time size"
"""Feeder functions"""
def neighborhood(iterable):
iterator = iter(iterable)
prev = (0)
item = next(iterator) # throws StopIteration if empty.
for nex in iterator:
yield (prev,item,nex)
prev = item
item = nex
yield (prev,item,None)
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
"""Non-feeder functions"""
def get_pkt_list(trace_data):
first_line = trace_data[0].rstrip()
first_line = first_line.split("\t")
first_time = float(first_line[0])
dta = []
for line in trace_data:
if "##HOST_FTS" in line:
continue
a = line.rstrip()
b = a.split("\t")
if "e-" in b[0]:
dr = b[1]
print("Exponent in total seconds: ", b)
b = [0.0, dr]
#print(b, float(b[0])- first_time)
if float(b[1]) > 0:
#dta.append(((float(b[0])- first_time), abs(int(b[2])), 1))
dta.append(((float(b[0])- first_time), 1))
else:
#dta.append(((float(b[1]) - first_time), abs(int(b[2])), -1))
dta.append(((float(b[0]) - first_time), -1))
return dta
def In_Out(list_data):
In = []
Out = []
for p in list_data:
if p[1] == -1:
In.append(p)
if p[1] == 1:
Out.append(p)
return In, Out
############### TIME FEATURES #####################
def inter_pkt_time(list_data):
times = [x[0] for x in list_data]
temp = []
#print(times)
#print(times[1:]+[times[0]])
for elem,next_elem in zip(times, times[1:]+[times[0]]):
temp.append(next_elem-elem)
return temp[:-1]
def interarrival_times(list_data):
In, Out = In_Out(list_data)
IN = inter_pkt_time(In)
OUT = inter_pkt_time(Out)
TOTAL = inter_pkt_time(list_data)
return IN, OUT, TOTAL
def interarrival_maxminmeansd_stats(list_data):
interstats = []
In, Out, Total = interarrival_times(list_data)
if In and Out:
avg_in = sum(In)/float(len(In))
avg_out = sum(Out)/float(len(Out))
avg_total = sum(Total)/float(len(Total))
interstats.append((max(In), max(Out), max(Total), avg_in, avg_out, avg_total, np.std(In), np.std(Out), np.std(Total), np.percentile(In, 75), np.percentile(Out, 75), np.percentile(Total, 75)))
elif Out and not In:
avg_out = sum(Out)/float(len(Out))
avg_total = sum(Total)/float(len(Total))
interstats.append((0, max(Out), max(Total), 0, avg_out, avg_total, 0, np.std(Out), np.std(Total), 0, np.percentile(Out, 75), np.percentile(Total, 75)))
elif In and not Out:
avg_in = sum(In)/float(len(In))
avg_total = sum(Total)/float(len(Total))
interstats.append((max(In), 0, max(Total), avg_in, 0, avg_total, np.std(In), 0, np.std(Total), np.percentile(In, 75), 0, np.percentile(Total, 75)))
else:
interstats.extend(([0]*15))
return interstats
def time_percentile_stats(trace_data):
Total = get_pkt_list(trace_data)
In, Out = In_Out(Total)
In1 = [x[0] for x in In]
Out1 = [x[0] for x in Out]
Total1 = [x[0] for x in Total]
STATS = []
if In1:
STATS.append(np.percentile(In1, 25)) # return 25th percentile
STATS.append(np.percentile(In1, 50))
STATS.append(np.percentile(In1, 75))
STATS.append(np.percentile(In1, 100))
if not In1:
STATS.extend(([0]*4))
if Out1:
STATS.append(np.percentile(Out1, 25)) # return 25th percentile
STATS.append(np.percentile(Out1, 50))
STATS.append(np.percentile(Out1, 75))
STATS.append(np.percentile(Out1, 100))
if not Out1:
STATS.extend(([0]*4))
if Total1:
STATS.append(np.percentile(Total1, 25)) # return 25th percentile
STATS.append(np.percentile(Total1, 50))
STATS.append(np.percentile(Total1, 75))
STATS.append(np.percentile(Total1, 100))
if not Total1:
STATS.extend(([0]*4))
return STATS
def number_pkt_stats(trace_data):
Total = get_pkt_list(trace_data)
In, Out = In_Out(Total)
return len(In), len(Out), len(Total)
def first_and_last_30_pkts_stats(trace_data):
Total = get_pkt_list(trace_data)
first30 = Total[:30]
last30 = Total[-30:]
first30in = []
first30out = []
for p in first30:
if p[1] == -1:
first30in.append(p)
if p[1] == 1:
first30out.append(p)
last30in = []
last30out = []
for p in last30:
if p[1] == -1:
last30in.append(p)
if p[1] == 1:
last30out.append(p)
stats= []
stats.append(len(first30in))
stats.append(len(first30out))
stats.append(len(last30in))
stats.append(len(last30out))
return stats
#concentration of outgoing packets in chunks of 20 packets
def pkt_concentration_stats(trace_data):
Total = get_pkt_list(trace_data)
chunks= [Total[x:x+20] for x in range(0, len(Total), 20)]
concentrations = []
for item in chunks:
c = 0
for p in item:
if p[1] == 1:
c+=1
concentrations.append(c)
return np.std(concentrations), sum(concentrations)/float(len(concentrations)), np.percentile(concentrations, 50), min(concentrations), max(concentrations), concentrations
#Average number packets sent and received per second
def number_per_sec(trace_data):
Total = get_pkt_list(trace_data)
#print(Total)
last_time = Total[-1][0]
last_second = math.ceil(last_time)
#print("Last time and second", last_time, last_second)
temp = []
l = []
for i in range(1, int(last_second)+1):
c = 0
for p in Total:
if p[0] <= i:
c+=1
temp.append(c)
for prev,item,next in neighborhood(temp):
x = item - prev
l.append(x)
#if not len(l) == 0:
avg_number_per_sec = sum(l)/float(len(l))
#else:
#avg_number_per_sec = 0
return avg_number_per_sec, np.std(l), np.percentile(l, 50), min(l), max(l), l
#Variant of packet ordering features from http://cacr.uwaterloo.ca/techreports/2014/cacr2014-05.pdf
def avg_pkt_ordering_stats(trace_data):
Total = get_pkt_list(trace_data)
c1 = 0
c2 = 0
temp1 = []
temp2 = []
for p in Total:
if p[1] == 1: # there is an error here, TEMP1 is IN, so sign should be -
temp1.append(c1)
c1+=1
if p[1] == -1:
temp2.append(c2)
c2+=1
avg_in = sum(temp1)/float(len(temp1))
avg_out = sum(temp2)/float(len(temp2))
return avg_in, avg_out, np.std(temp1), np.std(temp2)
def perc_inc_out(trace_data):
Total = get_pkt_list(trace_data)
In, Out = In_Out(Total)
percentage_in = len(In)/float(len(Total))
percentage_out = len(Out)/float(len(Total))
return percentage_in, percentage_out
############### SIZE FEATURES #####################
def total_size(list_data):
return sum([x[1] for x in list_data])
def in_out_size(list_data):
In, Out = In_Out(list_data)
size_in = sum([x[1] for x in In])
size_out = sum([x[1] for x in Out])
return size_in, size_out
def average_total_pkt_size(list_data):
return np.mean([x[1] for x in list_data])
def average_in_out_pkt_size(list_data):
In, Out = In_Out(list_data)
average_size_in = np.mean([x[1] for x in In])
average_size_out = np.mean([x[1] for x in Out])
return average_size_in, average_size_out
def variance_total_pkt_size(list_data):
return np.var([x[1] for x in list_data])
def variance_in_out_pkt_size(list_data):
In, Out = In_Out(list_data)
var_size_in = np.var([x[1] for x in In])
var_size_out = | np.var([x[1] for x in Out]) | numpy.var |
# deafrica_classificationtools.py
'''
Description: This file contains a set of python functions for conducting
machine learning classification on remote sensing data from Digital Earth
Africa's Open Data Cube
License: The code in this notebook is licensed under the Apache License,
Version 2.0 (https://www.apache.org/licenses/LICENSE-2.0). Digital Earth
Africa data is licensed under the Creative Commons by Attribution 4.0
license (https://creativecommons.org/licenses/by/4.0/).
Contact: If you need assistance, please post a question on the Open Data
Cube Slack channel (http://slack.opendatacube.org/) or on the GIS Stack
Exchange (https://gis.stackexchange.com/questions/ask?tags=open-data-cube)
using the `open-data-cube` tag (you can view previously asked questions
here: https://gis.stackexchange.com/questions/tagged/open-data-cube).
If you would like to report an issue with this script, you can file one on
Github https://github.com/digitalearthafrica/deafrica-sandbox-notebooks/issues
Last modified: September 2020
'''
import os
import sys
import joblib
import datacube
import rasterio
import numpy as np
import xarray as xr
from tqdm import tqdm
import dask.array as da
import geopandas as gpd
from copy import deepcopy
import multiprocessing as mp
import dask.distributed as dd
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
from sklearn.cluster import KMeans
from sklearn.base import clone
from datacube.utils import masking
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state
from abc import ABCMeta, abstractmethod
from datacube.utils import geometry
from sklearn.base import ClusterMixin
from dask.diagnostics import ProgressBar
from rasterio.features import rasterize
from sklearn.impute import SimpleImputer
from rasterio.features import geometry_mask
from dask_ml.wrappers import ParallelPostFit
from sklearn.mixture import GaussianMixture
from datacube.utils.geometry import assign_crs
from datacube_stats.statistics import GeoMedian
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import KFold, ShuffleSplit
from sklearn.model_selection import BaseCrossValidator
import warnings
from dea_tools.spatial import xr_rasterize
from dea_tools.bandindices import calculate_indices
from dea_tools.datahandling import load_ard, mostcommon_crs
def sklearn_flatten(input_xr):
"""
Reshape a DataArray or Dataset with spatial (and optionally
temporal) structure into an np.array with the spatial and temporal
dimensions flattened into one dimension.
This flattening procedure enables DataArrays and Datasets to be used
to train and predict
with sklearn models.
Last modified: September 2019
Parameters
----------
input_xr : xarray.DataArray or xarray.Dataset
Must have dimensions 'x' and 'y', may have dimension 'time'.
Dimensions other than 'x', 'y' and 'time' are unaffected by the
flattening.
Returns
----------
input_np : numpy.array
A numpy array corresponding to input_xr.data (or
input_xr.to_array().data), with dimensions 'x','y' and 'time'
flattened into a single dimension, which is the first axis of
the returned array. input_np contains no NaNs.
"""
# cast input Datasets to DataArray
if isinstance(input_xr, xr.Dataset):
input_xr = input_xr.to_array()
# stack across pixel dimensions, handling timeseries if necessary
if 'time' in input_xr.dims:
stacked = input_xr.stack(z=['x', 'y', 'time'])
else:
stacked = input_xr.stack(z=['x', 'y'])
# finding 'bands' dimensions in each pixel - these will not be
# flattened as their context is important for sklearn
pxdims = []
for dim in stacked.dims:
if dim != 'z':
pxdims.append(dim)
# mask NaNs - we mask pixels with NaNs in *any* band, because
# sklearn cannot accept NaNs as input
mask = np.isnan(stacked)
if len(pxdims) != 0:
mask = mask.any(dim=pxdims)
# turn the mask into a numpy array (boolean indexing with xarrays
# acts weird)
mask = mask.data
# the dimension we are masking along ('z') needs to be the first
# dimension in the underlying np array for the boolean indexing to work
stacked = stacked.transpose('z', *pxdims)
input_np = stacked.data[~mask]
return input_np
def sklearn_unflatten(output_np, input_xr):
"""
Reshape a numpy array with no 'missing' elements (NaNs) and
'flattened' spatiotemporal structure into a DataArray matching the
spatiotemporal structure of the DataArray
This enables an sklearn model's prediction to be remapped to the
correct pixels in the input DataArray or Dataset.
Last modified: September 2019
Parameters
----------
output_np : numpy.array
The first dimension's length should correspond to the number of
valid (non-NaN) pixels in input_xr.
input_xr : xarray.DataArray or xarray.Dataset
Must have dimensions 'x' and 'y', may have dimension 'time'.
Dimensions other than 'x', 'y' and 'time' are unaffected by the
flattening.
Returns
----------
output_xr : xarray.DataArray
An xarray.DataArray with the same dimensions 'x', 'y' and 'time'
as input_xr, and the same valid (non-NaN) pixels. These pixels
are set to match the data in output_np.
"""
# the output of a sklearn model prediction should just be a numpy array
# with size matching x*y*time for the input DataArray/Dataset.
# cast input Datasets to DataArray
if isinstance(input_xr, xr.Dataset):
input_xr = input_xr.to_array()
# generate the same mask we used to create the input to the sklearn model
if 'time' in input_xr.dims:
stacked = input_xr.stack(z=['x', 'y', 'time'])
else:
stacked = input_xr.stack(z=['x', 'y'])
pxdims = []
for dim in stacked.dims:
if dim != 'z':
pxdims.append(dim)
mask = np.isnan(stacked)
if len(pxdims) != 0:
mask = mask.any(dim=pxdims)
# handle multivariable output
output_px_shape = ()
if len(output_np.shape[1:]):
output_px_shape = output_np.shape[1:]
# use the mask to put the data in all the right places
output_ma = np.ma.empty((len(stacked.z), *output_px_shape))
output_ma[~mask] = output_np
output_ma[mask] = np.ma.masked
# set the stacked coordinate to match the input
output_xr = xr.DataArray(
output_ma,
coords={'z': stacked['z']},
dims=[
'z',
*['output_dim_' + str(idx) for idx in range(len(output_px_shape))]
])
output_xr = output_xr.unstack()
return output_xr
def fit_xr(model, input_xr):
"""
Utilise our wrappers to fit a vanilla sklearn model.
Last modified: September 2019
Parameters
----------
model : scikit-learn model or compatible object
Must have a fit() method that takes numpy arrays.
input_xr : xarray.DataArray or xarray.Dataset.
Must have dimensions 'x' and 'y', may have dimension 'time'.
Returns
----------
model : a scikit-learn model which has been fitted to the data in
the pixels of input_xr.
"""
model = model.fit(sklearn_flatten(input_xr))
return model
def predict_xr(model,
input_xr,
chunk_size=None,
persist=False,
proba=False,
clean=False,
return_input=False):
"""
Using dask-ml ParallelPostfit(), runs the parallel
predict and predict_proba methods of sklearn
estimators. Useful for running predictions
on a larger-than-RAM datasets.
Last modified: September 2020
Parameters
----------
model : scikit-learn model or compatible object
Must have a .predict() method that takes numpy arrays.
input_xr : xarray.DataArray or xarray.Dataset.
Must have dimensions 'x' and 'y'
chunk_size : int
The dask chunk size to use on the flattened array. If this
is left as None, then the chunks size is inferred from the
.chunks method on the `input_xr`
persist : bool
If True, and proba=True, then 'input_xr' data will be
loaded into distributed memory. This will ensure data
is not loaded twice for the prediction of probabilities,
but this will only work if the data is not larger than
distributed RAM.
proba : bool
If True, predict probabilities
clean : bool
If True, remove Infs and NaNs from input and output arrays
return_input : bool
If True, then the data variables in the 'input_xr' dataset will
be appended to the output xarray dataset.
Returns
----------
output_xr : xarray.Dataset
An xarray.Dataset containing the prediction output from model.
if proba=True then dataset will also contain probabilites, and
if return_input=True then dataset will have the input feature layers.
Has the same spatiotemporal structure as input_xr.
"""
# if input_xr isn't dask, coerce it
dask = True
if not bool(input_xr.chunks):
dask = False
input_xr = input_xr.chunk({'x': len(input_xr.x), 'y': len(input_xr.y)})
#set chunk size if not supplied
if chunk_size is None:
chunk_size = int(input_xr.chunks['x'][0]) * \
int(input_xr.chunks['y'][0])
def _predict_func(model, input_xr, persist, proba, clean, return_input):
x, y, crs = input_xr.x, input_xr.y, input_xr.geobox.crs
input_data = []
for var_name in input_xr.data_vars:
input_data.append(input_xr[var_name])
input_data_flattened = []
for arr in input_data:
data = arr.data.flatten().rechunk(chunk_size)
input_data_flattened.append(data)
# reshape for prediction
input_data_flattened = da.array(input_data_flattened).transpose()
if clean == True:
input_data_flattened = da.where(da.isfinite(input_data_flattened),
input_data_flattened, 0)
if (proba == True) & (persist == True):
# persisting data so we don't require loading all the data twice
input_data_flattened = input_data_flattened.persist()
# apply the classification
print('predicting...')
out_class = model.predict(input_data_flattened)
# Mask out NaN or Inf values in results
if clean == True:
out_class = da.where(da.isfinite(out_class), out_class, 0)
# Reshape when writing out
out_class = out_class.reshape(len(y), len(x))
# stack back into xarray
output_xr = xr.DataArray(out_class,
coords={
"x": x,
"y": y
},
dims=["y", "x"])
output_xr = output_xr.to_dataset(name='Predictions')
if proba == True:
print(" probabilities...")
out_proba = model.predict_proba(input_data_flattened)
# convert to %
out_proba = da.max(out_proba, axis=1) * 100.0
if clean == True:
out_proba = da.where(da.isfinite(out_proba), out_proba, 0)
out_proba = out_proba.reshape(len(y), len(x))
out_proba = xr.DataArray(out_proba,
coords={
"x": x,
"y": y
},
dims=["y", "x"])
output_xr['Probabilities'] = out_proba
if return_input == True:
print(" input features...")
# unflatten the input_data_flattened array and append
# to the output_xr containin the predictions
arr = input_xr.to_array()
stacked = arr.stack(z=['y', 'x'])
# handle multivariable output
output_px_shape = ()
if len(input_data_flattened.shape[1:]):
output_px_shape = input_data_flattened.shape[1:]
output_features = input_data_flattened.reshape(
(len(stacked.z), *output_px_shape))
# set the stacked coordinate to match the input
output_features = xr.DataArray(
output_features,
coords={
'z': stacked['z']
},
dims=[
'z', *[
'output_dim_' + str(idx)
for idx in range(len(output_px_shape))
]
]).unstack()
# convert to dataset and rename arrays
output_features = output_features.to_dataset(dim='output_dim_0')
data_vars = list(input_xr.data_vars)
output_features = output_features.rename(
{i: j for i, j in zip(output_features.data_vars, data_vars)})
# merge with predictions
output_xr = xr.merge([output_xr, output_features],
compat='override')
return assign_crs(output_xr, str(crs))
if dask == True:
# convert model to dask predict
model = ParallelPostFit(model)
with joblib.parallel_backend('dask'):
output_xr = _predict_func(model, input_xr, persist, proba, clean,
return_input)
else:
output_xr = _predict_func(model, input_xr, persist, proba, clean,
return_input).compute()
return output_xr
class HiddenPrints:
"""
For concealing unwanted print statements called by other functions
"""
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
def _get_training_data_for_shp(gdf,
index,
row,
out_arrs,
out_vars,
products,
dc_query,
return_coords,
custom_func=None,
field=None,
calc_indices=None,
reduce_func=None,
drop=True,
zonal_stats=None):
"""
This is the core function that is triggered by `collect_training_data`.
The `collect_training_data` function loops through geometries in a geopandas
geodataframe and runs the code within `_get_training_data_for_shp`.
Parameters are inherited from `collect_training_data`.
See that function for information on the other params not listed below.
Parameters
----------
index, row : iterables inherited from geopandas object
out_arrs : list
An empty list into which the training data arrays are stored.
out_vars : list
An empty list into which the data varaible names are stored.
Returns
--------
Two lists, a list of numpy.arrays containing classes and extracted data for
each pixel or polygon, and another containing the data variable names.
"""
# prevent function altering dictionary kwargs
dc_query = deepcopy(dc_query)
# remove dask chunks if supplied as using
# mulitprocessing for parallization
if 'dask_chunks' in dc_query.keys():
dc_query.pop('dask_chunks', None)
# connect to datacube
dc = datacube.Datacube(app='training_data')
# set up query based on polygon
geom = geometry.Geometry(geom=gdf.iloc[index].geometry, crs=gdf.crs)
q = {"geopolygon": geom}
# merge polygon query with user supplied query params
dc_query.update(q)
# load_ard doesn't handle derivative products, so check
# products aren't one of those below
others = [
'ls5_nbart_geomedian_annual', 'ls7_nbart_geomedian_annual',
'ls8_nbart_geomedian_annual', 'ls5_nbart_tmad_annual',
'ls7_nbart_tmad_annual', 'ls8_nbart_tmad_annual',
'landsat_barest_earth', 'ls8_barest_earth_albers'
]
if products[0] in others:
ds = dc.load(product=products[0], **dc_query)
ds = ds.where(ds != 0, np.nan)
else:
# load data
with HiddenPrints():
ds = load_ard(dc=dc, products=products, **dc_query)
# create polygon mask
with HiddenPrints():
mask = xr_rasterize(gdf.iloc[[index]], ds)
# Use custom function for training data if it exists
if custom_func is not None:
with HiddenPrints():
data = custom_func(ds)
data = data.where(mask)
else:
# mask dataset
ds = ds.where(mask)
# first check enough variables are set to run functions
if (len(ds.time.values) > 1) and (reduce_func == None):
raise ValueError(
"You're dataset has " + str(len(ds.time.values)) +
" time-steps, please provide a time reduction function," +
" e.g. reduce_func='mean'")
if calc_indices is not None:
# determine which collection is being loaded
if products[0] in others:
collection = 'ga_ls_2'
elif '3' in products[0]:
collection = 'ga_ls_3'
elif 's2' in products[0]:
collection = 'ga_s2_1'
if len(ds.time.values) > 1:
if reduce_func in ['mean', 'median', 'std', 'max', 'min']:
with HiddenPrints():
data = calculate_indices(ds,
index=calc_indices,
drop=drop,
collection=collection)
# getattr is equivalent to calling data.reduce_func
method_to_call = getattr(data, reduce_func)
data = method_to_call(dim='time')
elif reduce_func == 'geomedian':
data = GeoMedian().compute(ds)
with HiddenPrints():
data = calculate_indices(data,
index=calc_indices,
drop=drop,
collection=collection)
else:
raise Exception(
reduce_func + " is not one of the supported" +
" reduce functions ('mean','median','std','max','min', 'geomedian')"
)
else:
with HiddenPrints():
data = calculate_indices(ds,
index=calc_indices,
drop=drop,
collection=collection)
# when band indices are not required, reduce the
# dataset to a 2d array through means or (geo)medians
if calc_indices is None:
if len(ds.time.values) > 1:
if reduce_func == 'geomedian':
data = GeoMedian().compute(ds)
elif reduce_func in ['mean', 'median', 'std', 'max', 'min']:
method_to_call = getattr(ds, reduce_func)
data = method_to_call('time')
else:
data = ds.squeeze()
if return_coords == True:
# turn coords into a variable in the ds
data['x_coord'] = ds.x + 0 * ds.y
data['y_coord'] = ds.y + 0 * ds.x
if zonal_stats is None:
# If no zonal stats were requested then extract all pixel values
flat_train = sklearn_flatten(data)
flat_val = np.repeat(row[field], flat_train.shape[0])
stacked = np.hstack((np.expand_dims(flat_val, axis=1), flat_train))
elif zonal_stats in ['mean', 'median', 'std', 'max', 'min']:
method_to_call = getattr(data, zonal_stats)
flat_train = method_to_call()
flat_train = flat_train.to_array()
stacked = np.hstack((row[field], flat_train))
else:
raise Exception(zonal_stats + " is not one of the supported" +
" reduce functions ('mean','median','std','max','min')")
#return unique-id so we can index if load failed silently
_id = gdf.iloc[index]['id']
# Append training data and labels to list
out_arrs.append(np.append(stacked, _id))
out_vars.append([field] + list(data.data_vars) + ['id'])
def _get_training_data_parallel(gdf,
products,
dc_query,
ncpus,
return_coords,
custom_func=None,
field=None,
calc_indices=None,
reduce_func=None,
drop=True,
zonal_stats=None):
"""
Function passing the '_get_training_data_for_shp' function
to a mulitprocessing.Pool.
Inherits variables from 'collect_training_data()'.
"""
# Check if dask-client is running
try:
zx = None
zx = dd.get_client()
except:
pass
if zx is not None:
raise ValueError(
"You have a Dask Client running, which prevents \n"
"this function from multiprocessing. Close the client.")
# instantiate lists that can be shared across processes
manager = mp.Manager()
results = manager.list()
column_names = manager.list()
# progress bar
pbar = tqdm(total=len(gdf))
def update(*a):
pbar.update()
with mp.Pool(ncpus) as pool:
for index, row in gdf.iterrows():
pool.apply_async(_get_training_data_for_shp, [
gdf, index, row, results, column_names, products, dc_query,
return_coords, custom_func, field, calc_indices, reduce_func,
drop, zonal_stats
],
callback=update)
pool.close()
pool.join()
pbar.close()
return column_names, results
def collect_training_data(gdf,
products,
dc_query,
ncpus=1,
return_coords=False,
custom_func=None,
field=None,
calc_indices=None,
reduce_func=None,
drop=True,
zonal_stats=None,
clean=True,
fail_threshold=0.02,
max_retries=3):
"""
This function executes the training data functions and tidies the results
into a 'model_input' object containing stacked training data arrays
with all NaNs & Infs removed. In the instance where ncpus > 1, a parallel version of the
function will be run (functions are passed to a mp.Pool())
This function provides a number of pre-defined feature layer methods,
including calculating band indices, reducing time series using several summary statistics,
and/or generating zonal statistics across polygons. The 'custom_func' parameter provides
a method for the user to supply a custom function for generating features rather than using the
pre-defined methods.
Parameters
----------
gdf : geopandas geodataframe
geometry data in the form of a geopandas geodataframe
products : list
a list of products to load from the datacube.
e.g. ['ls8_usgs_sr_scene', 'ls7_usgs_sr_scene']
dc_query : dictionary
Datacube query object, should not contain lat and long (x or y)
variables as these are supplied by the 'gdf' variable
ncpus : int
The number of cpus/processes over which to parallelize the gathering
of training data (only if ncpus is > 1). Use 'mp.cpu_count()' to determine the number of
cpus available on a machine. Defaults to 1.
return_coords : bool
If True, then the training data will contain two extra columns 'x_coord' and
'y_coord' corresponding to the x,y coordinate of each sample. This variable can
be useful for handling spatial autocorrelation between samples later in the ML workflow.
custom_func : function, optional
A custom function for generating feature layers. If this parameter
is set, all other options (excluding 'zonal_stats'), will be ignored.
The result of the 'custom_func' must be a single xarray dataset
containing 2D coordinates (i.e x, y - no time dimension). The custom function
has access to the datacube dataset extracted using the 'dc_query' params. To load
other datasets, you can use the 'like=ds.geobox' parameter in dc.load
field : str
Name of the column in the gdf that contains the class labels
calc_indices: list, optional
If not using a custom func, then this parameter provides a method for
calculating a number of remote sensing indices (e.g. `['NDWI', 'NDVI']`).
reduce_func : string, optional
Function to reduce the data from multiple time steps to
a single timestep. Options are 'mean', 'median', 'std',
'max', 'min', 'geomedian'. Ignored if 'custom_func' is provided.
drop : boolean, optional ,
If this variable is set to True, and 'calc_indices' are supplied, the
spectral bands will be dropped from the dataset leaving only the
band indices as data variables in the dataset. Default is True.
zonal_stats : string, optional
An optional string giving the names of zonal statistics to calculate
for each polygon. Default is None (all pixel values are returned). Supported
values are 'mean', 'median', 'max', 'min', and 'std'. Will work in
conjuction with a 'custom_func'.
clean : bool
Whether or not to remove missing values in the training dataset. If True,
training labels with any NaNs or Infs in the feature layers will be dropped
from the dataset.
fail_threshold : float, default 0.05
Silent read fails on S3 during multiprocessing can result in some rows of the
returned data containing all NaN values. Set the 'fail_threshold' fraction to
specify a minimum number of acceptable fails e.g. setting 'fail_threshold' to 0.05
means 5 % no-data in the returned dataset is acceptable. Above this fraction the
function will attempt to recollect the samples that have failed.
A sample is defined as having failed if it returns > 50 % NaN values.
max_retries: int, default 3
Number of times to retry collecting a sample. This number is invoked if the 'fail_threshold' is
not reached.
Returns
--------
Two lists, a list of numpy.arrays containing classes and extracted data for
each pixel or polygon, and another containing the data variable names.
"""
# check the dtype of the class field
if (gdf[field].dtype != np.int):
raise ValueError(
'The "field" column of the input vector must contain integer dtypes'
)
# set up some print statements
if custom_func is not None:
print("Reducing data using user supplied custom function")
if calc_indices is not None and custom_func is None:
print("Calculating indices: " + str(calc_indices))
if reduce_func is not None and custom_func is None:
print("Reducing data using: " + reduce_func)
if zonal_stats is not None:
print("Taking zonal statistic: " + zonal_stats)
#add unique id to gdf to help later with indexing failed rows
#during muliprocessing
gdf['id'] = range(0, len(gdf))
if ncpus == 1:
# progress indicator
print('Collecting training data in serial mode')
i = 0
# list to store results
results = []
column_names = []
# loop through polys and extract training data
for index, row in gdf.iterrows():
print(" Feature {:04}/{:04}\r".format(i + 1, len(gdf)), end='')
_get_training_data_for_shp(gdf, index, row, results, column_names,
products, dc_query, return_coords,
custom_func, field, calc_indices,
reduce_func, drop, zonal_stats)
i += 1
else:
print('Collecting training data in parallel mode')
column_names, results = _get_training_data_parallel(
gdf=gdf,
products=products,
dc_query=dc_query,
ncpus=ncpus,
return_coords=return_coords,
custom_func=custom_func,
field=field,
calc_indices=calc_indices,
reduce_func=reduce_func,
drop=drop,
zonal_stats=zonal_stats)
# column names are appeneded during each iteration
# but they are identical, grab only the first instance
column_names = column_names[0]
# Stack the extracted training data for each feature into a single array
model_input = np.vstack(results)
# this code block iteratively retries failed rows
# up to max_retries or until fail_threshold is
# reached - whichever occurs first
if ncpus > 1:
i = 1
while (i <= max_retries):
# Count number of fails
num = np.count_nonzero(np.isnan(model_input), axis=1) > int(
model_input.shape[1] * 0.5)
num = num.sum()
fail_rate = num / len(gdf)
print('Percentage of possible fails after run ' + str(i) + ' = ' +
str(round(fail_rate * 100, 2)) + ' %')
if fail_rate > fail_threshold:
print('Recollecting samples that failed')
#find rows where NaNs account for more than half the values
nans = model_input[np.count_nonzero(
np.isnan(model_input), axis=1) > int(model_input.shape[1] *
0.5)]
#remove nan rows from model_input object
model_input = model_input[np.count_nonzero(
| np.isnan(model_input) | numpy.isnan |
from collections import defaultdict
import cPickle as pickle
import os
import time
import numpy as np
import tensorflow as tf
from sym_net import SymNet
from util import *
# Data
tf.app.flags.DEFINE_string('train_txt_fp', '', 'Training dataset txt file with a list of pickled song files')
tf.app.flags.DEFINE_string('valid_txt_fp', '', 'Eval dataset txt file with a list of pickled song files')
tf.app.flags.DEFINE_string('test_txt_fp', '', 'Test dataset txt file with a list of pickled song files')
tf.app.flags.DEFINE_string('sym_rnn_pretrain_model_ckpt_fp', '', 'File path to model checkpoint with only sym weights')
tf.app.flags.DEFINE_string('model_ckpt_fp', '', 'File path to model checkpoint if resuming or eval')
# Features
tf.app.flags.DEFINE_string('sym_in_type', 'onehot', 'Either \'onehot\' or \'bagofarrows\'')
tf.app.flags.DEFINE_string('sym_out_type', 'onehot', 'Either \'onehot\' or \'bagofarrows\'')
tf.app.flags.DEFINE_integer('sym_narrows', 4, 'Number or arrows in data')
tf.app.flags.DEFINE_integer('sym_narrowclasses', 4, 'Number or arrow classes in data')
tf.app.flags.DEFINE_integer('sym_embedding_size', 32, '')
tf.app.flags.DEFINE_bool('audio_z_score', False, 'If true, train and test on z-score of validation data')
tf.app.flags.DEFINE_integer('audio_deviation_max', 0, '')
tf.app.flags.DEFINE_integer('audio_context_radius', -1, 'Past and future context per training example')
tf.app.flags.DEFINE_integer('audio_nbands', 0, 'Number of bands per frame')
tf.app.flags.DEFINE_integer('audio_nchannels', 0, 'Number of channels per frame')
tf.app.flags.DEFINE_bool('feat_meas_phase', False, '')
tf.app.flags.DEFINE_bool('feat_meas_phase_cos', False, '')
tf.app.flags.DEFINE_bool('feat_meas_phase_sin', False, '')
tf.app.flags.DEFINE_bool('feat_beat_phase', False, '')
tf.app.flags.DEFINE_bool('feat_beat_phase_cos', False, '')
tf.app.flags.DEFINE_bool('feat_beat_phase_sin', False, '')
tf.app.flags.DEFINE_bool('feat_beat_diff', False, '')
tf.app.flags.DEFINE_bool('feat_beat_diff_next', False, '')
tf.app.flags.DEFINE_bool('feat_beat_abs', False, '')
tf.app.flags.DEFINE_bool('feat_time_diff', False, '')
tf.app.flags.DEFINE_bool('feat_time_diff_next', False, '')
tf.app.flags.DEFINE_bool('feat_time_abs', False, '')
tf.app.flags.DEFINE_bool('feat_prog_diff', False, '')
tf.app.flags.DEFINE_bool('feat_prog_abs', False, '')
tf.app.flags.DEFINE_bool('feat_diff_feet', False, '')
tf.app.flags.DEFINE_bool('feat_diff_aps', False, '')
tf.app.flags.DEFINE_integer('feat_beat_phase_nquant', 0, '')
tf.app.flags.DEFINE_integer('feat_beat_phase_max_nwraps', 0, '')
tf.app.flags.DEFINE_integer('feat_meas_phase_nquant', 0, '')
tf.app.flags.DEFINE_integer('feat_meas_phase_max_nwraps', 0, '')
tf.app.flags.DEFINE_string('feat_diff_feet_to_id_fp', '', '')
tf.app.flags.DEFINE_string('feat_diff_coarse_to_id_fp', '', '')
tf.app.flags.DEFINE_bool('feat_diff_dipstick', False, '')
tf.app.flags.DEFINE_string('feat_freetext_to_id_fp', '', '')
tf.app.flags.DEFINE_integer('feat_bucket_beat_diff_n', None, '')
tf.app.flags.DEFINE_float('feat_bucket_beat_diff_max', None, '')
tf.app.flags.DEFINE_integer('feat_bucket_time_diff_n', None, '')
tf.app.flags.DEFINE_float('feat_bucket_time_diff_max', None, '')
# Network params
tf.app.flags.DEFINE_integer('batch_size', 128, 'Batch size for training')
tf.app.flags.DEFINE_integer('nunroll', 1, '')
tf.app.flags.DEFINE_string('cnn_filter_shapes', '', 'CSV 3-tuples of filter shapes (time, freq, n)')
tf.app.flags.DEFINE_string('cnn_pool', '', 'CSV 2-tuples of pool amounts (time, freq)')
tf.app.flags.DEFINE_integer('cnn_dim_reduction_size', -1, '')
tf.app.flags.DEFINE_float('cnn_dim_reduction_keep_prob', 1.0, '')
tf.app.flags.DEFINE_string('cnn_dim_reduction_nonlin', '', '')
tf.app.flags.DEFINE_string('rnn_cell_type', 'lstm', '')
tf.app.flags.DEFINE_integer('rnn_size', 0, '')
tf.app.flags.DEFINE_integer('rnn_nlayers', 0, '')
tf.app.flags.DEFINE_float('rnn_keep_prob', 1.0, '')
tf.app.flags.DEFINE_string('dnn_sizes', '', 'CSV sizes for dense layers')
tf.app.flags.DEFINE_float('dnn_keep_prob', 1.0, '')
# Training params
tf.app.flags.DEFINE_float('grad_clip', 0.0, 'Clip gradients to this value if greater than 0')
tf.app.flags.DEFINE_string('opt', 'sgd', 'One of \'sgd\'')
tf.app.flags.DEFINE_float('lr', 1.0, 'Learning rate')
tf.app.flags.DEFINE_float('lr_decay_rate', 1.0, 'Multiply learning rate by this value every epoch')
tf.app.flags.DEFINE_integer('lr_decay_delay', 0, '')
tf.app.flags.DEFINE_integer('nbatches_per_ckpt', 100, 'Save model weights every N batches')
tf.app.flags.DEFINE_integer('nbatches_per_eval', 10000, 'Evaluate model every N batches')
tf.app.flags.DEFINE_integer('nepochs', 0, 'Number of training epochs, negative means train continuously')
tf.app.flags.DEFINE_string('experiment_dir', '', 'Directory for temporary training files and model weights')
# Eval params
# Generate params
tf.app.flags.DEFINE_string('generate_fp', '', '')
tf.app.flags.DEFINE_string('generate_vocab_fp', '', '')
FLAGS = tf.app.flags.FLAGS
dtype = tf.float32
def main(_):
assert FLAGS.experiment_dir
do_train = FLAGS.nepochs != 0 and bool(FLAGS.train_txt_fp)
do_valid = bool(FLAGS.valid_txt_fp)
do_train_eval = do_train and do_valid
do_eval = bool(FLAGS.test_txt_fp)
do_generate = bool(FLAGS.generate_fp)
# Load data
print('Loading data')
train_data, valid_data, test_data = open_dataset_fps(FLAGS.train_txt_fp, FLAGS.valid_txt_fp, FLAGS.test_txt_fp)
# Calculate validation metrics
if FLAGS.audio_z_score:
z_score_fp = os.path.join(FLAGS.experiment_dir, 'valid_mean_std.pkl')
if do_valid and not os.path.exists(z_score_fp):
print('Calculating validation metrics')
mean_per_band, std_per_band = calc_mean_std_per_band(valid_data)
with open(z_score_fp, 'wb') as f:
pickle.dump((mean_per_band, std_per_band), f)
else:
print('Loading validation metrics')
with open(z_score_fp, 'rb') as f:
mean_per_band, std_per_band = pickle.load(f)
# Sanitize data
for data in [train_data, valid_data, test_data]:
apply_z_norm(data, mean_per_band, std_per_band)
# Flatten the data into chart references for easier iteration
print('Flattening datasets into charts')
charts_train = flatten_dataset_to_charts(train_data)
charts_valid = flatten_dataset_to_charts(valid_data)
charts_test = flatten_dataset_to_charts(test_data)
# Filter charts that are too short
charts_train_len = len(charts_train)
charts_train = filter(lambda x: x.get_nannotations() >= FLAGS.nunroll, charts_train)
if len(charts_train) != charts_train_len:
print('{} charts too small for training'.format(charts_train_len - len(charts_train)))
print('Train set: {} charts, valid set: {} charts, test set: {} charts'.format(len(charts_train), len(charts_valid), len(charts_test)))
# Load ID maps
diff_feet_to_id = None
if FLAGS.feat_diff_feet_to_id_fp:
diff_feet_to_id = load_id_dict(FLAGS.feat_diff_feet_to_id_fp)
diff_coarse_to_id = None
if FLAGS.feat_diff_coarse_to_id_fp:
diff_coarse_to_id = load_id_dict(FLAGS.feat_diff_coarse_to_id_fp)
freetext_to_id = None
if FLAGS.feat_freetext_to_id_fp:
freetext_to_id = load_id_dict(FLAGS.feat_freetext_to_id_fp)
# Create feature config
feats_config = {
'meas_phase': FLAGS.feat_meas_phase,
'meas_phase_cos': FLAGS.feat_meas_phase_cos,
'meas_phase_sin': FLAGS.feat_meas_phase_sin,
'beat_phase': FLAGS.feat_beat_phase,
'beat_phase_cos': FLAGS.feat_beat_phase_cos,
'beat_phase_sin': FLAGS.feat_beat_phase_sin,
'beat_diff': FLAGS.feat_beat_diff,
'beat_diff_next': FLAGS.feat_beat_diff_next,
'beat_abs': FLAGS.feat_beat_abs,
'time_diff': FLAGS.feat_time_diff,
'time_diff_next': FLAGS.feat_time_diff_next,
'time_abs': FLAGS.feat_time_abs,
'prog_diff': FLAGS.feat_prog_diff,
'prog_abs': FLAGS.feat_prog_abs,
'diff_feet': FLAGS.feat_diff_feet,
'diff_aps': FLAGS.feat_diff_aps,
'beat_phase_nquant': FLAGS.feat_beat_phase_nquant,
'beat_phase_max_nwraps': FLAGS.feat_beat_phase_max_nwraps,
'meas_phase_nquant': FLAGS.feat_meas_phase_nquant,
'meas_phase_max_nwraps': FLAGS.feat_meas_phase_max_nwraps,
'diff_feet_to_id': diff_feet_to_id,
'diff_coarse_to_id': diff_coarse_to_id,
'freetext_to_id': freetext_to_id,
'bucket_beat_diff_n': FLAGS.feat_bucket_beat_diff_n,
'bucket_time_diff_n': FLAGS.feat_bucket_time_diff_n
}
nfeats = 0
for feat in feats_config.values():
if feat is None:
continue
if isinstance(feat, dict):
nfeats += max(feat.values()) + 1
else:
nfeats += int(feat)
nfeats += 1 if FLAGS.feat_beat_phase_max_nwraps > 0 else 0
nfeats += 1 if FLAGS.feat_meas_phase_max_nwraps > 0 else 0
nfeats += 1 if FLAGS.feat_bucket_beat_diff_n > 0 else 0
nfeats += 1 if FLAGS.feat_bucket_time_diff_n > 0 else 0
feats_config['diff_dipstick'] = FLAGS.feat_diff_dipstick
feats_config['audio_time_context_radius'] = FLAGS.audio_context_radius
feats_config['audio_deviation_max'] = FLAGS.audio_deviation_max
feats_config['bucket_beat_diff_max'] = FLAGS.feat_bucket_beat_diff_max
feats_config['bucket_time_diff_max'] = FLAGS.feat_bucket_time_diff_max
feats_config_eval = dict(feats_config)
feats_config_eval['audio_deviation_max'] = 0
print('Feature configuration (nfeats={}): {}'.format(nfeats, feats_config))
# Create model config
rnn_proj_init = tf.constant_initializer(0.0, dtype=dtype) if FLAGS.sym_rnn_pretrain_model_ckpt_fp else tf.uniform_unit_scaling_initializer(factor=1.0, dtype=dtype)
model_config = {
'nunroll': FLAGS.nunroll,
'sym_in_type': FLAGS.sym_in_type,
'sym_embedding_size': FLAGS.sym_embedding_size,
'sym_out_type': FLAGS.sym_out_type,
'sym_narrows': FLAGS.sym_narrows,
'sym_narrowclasses': FLAGS.sym_narrowclasses,
'other_nfeats': nfeats,
'audio_context_radius': FLAGS.audio_context_radius,
'audio_nbands': FLAGS.audio_nbands,
'audio_nchannels': FLAGS.audio_nchannels,
'cnn_filter_shapes': stride_csv_arg_list(FLAGS.cnn_filter_shapes, 3, int),
'cnn_init': tf.uniform_unit_scaling_initializer(factor=1.43, dtype=dtype),
'cnn_pool': stride_csv_arg_list(FLAGS.cnn_pool, 2, int),
'cnn_dim_reduction_size': FLAGS.cnn_dim_reduction_size,
'cnn_dim_reduction_init': tf.uniform_unit_scaling_initializer(factor=1.0, dtype=dtype),
'cnn_dim_reduction_nonlin': FLAGS.cnn_dim_reduction_nonlin,
'cnn_dim_reduction_keep_prob': FLAGS.cnn_dim_reduction_keep_prob,
'rnn_proj_init': rnn_proj_init,
'rnn_cell_type': FLAGS.rnn_cell_type,
'rnn_size': FLAGS.rnn_size,
'rnn_nlayers': FLAGS.rnn_nlayers,
'rnn_init': tf.random_uniform_initializer(-5e-2, 5e-2, dtype=dtype),
'nunroll': FLAGS.nunroll,
'rnn_keep_prob': FLAGS.rnn_keep_prob,
'dnn_sizes': stride_csv_arg_list(FLAGS.dnn_sizes, 1, int),
'dnn_init': tf.uniform_unit_scaling_initializer(factor=1.15, dtype=dtype),
'dnn_keep_prob': FLAGS.dnn_keep_prob,
'grad_clip': FLAGS.grad_clip,
'opt': FLAGS.opt,
}
print('Model configuration: {}'.format(model_config))
with tf.Graph().as_default(), tf.Session() as sess:
if do_train:
print('Creating train model')
with tf.variable_scope('model_ss', reuse=None):
model_train = SymNet(mode='train', batch_size=FLAGS.batch_size, **model_config)
if do_train_eval or do_eval:
print('Creating eval model')
with tf.variable_scope('model_ss', reuse=do_train):
eval_batch_size = FLAGS.batch_size
if FLAGS.rnn_size > 0 and FLAGS.rnn_nlayers > 0:
eval_batch_size = 1
model_eval = SymNet(mode='eval', batch_size=eval_batch_size, **model_config)
model_early_stop_xentropy_avg = tf.train.Saver(tf.global_variables(), max_to_keep=None)
model_early_stop_accuracy = tf.train.Saver(tf.global_variables(), max_to_keep=None)
if do_generate:
print('Creating generation model')
with tf.variable_scope('model_ss', reuse=do_train):
eval_batch_size = FLAGS.batch_size
model_gen = SymNet(mode='gen', batch_size=1, **model_config)
# Restore or init model
model_saver = tf.train.Saver(tf.global_variables())
if FLAGS.model_ckpt_fp:
print('Restoring model weights from {}'.format(FLAGS.model_ckpt_fp))
model_saver.restore(sess, FLAGS.model_ckpt_fp)
else:
print('Initializing model weights from scratch')
sess.run(tf.global_variables_initializer())
# Restore or init sym weights
if FLAGS.sym_rnn_pretrain_model_ckpt_fp:
print('Restoring pretrained weights from {}'.format(FLAGS.sym_rnn_pretrain_model_ckpt_fp))
var_list_old = filter(lambda x: 'nosym' not in x.name and 'cnn' not in x.name, tf.global_variables())
pretrain_saver = tf.train.Saver(var_list_old)
pretrain_saver.restore(sess, FLAGS.sym_rnn_pretrain_model_ckpt_fp)
# Create summaries
if do_train:
summary_writer = tf.summary.FileWriter(FLAGS.experiment_dir, sess.graph)
epoch_mean_xentropy = tf.placeholder(tf.float32, shape=[], name='epoch_mean_xentropy')
epoch_mean_time = tf.placeholder(tf.float32, shape=[], name='epoch_mean_time')
epoch_var_xentropy = tf.placeholder(tf.float32, shape=[], name='epoch_var_xentropy')
epoch_var_time = tf.placeholder(tf.float32, shape=[], name='epoch_var_time')
epoch_time_total = tf.placeholder(tf.float32, shape=[], name='epoch_time_total')
epoch_summaries = tf.summary.merge([
tf.summary.scalar('epoch_mean_xentropy', epoch_mean_xentropy),
tf.summary.scalar('epoch_mean_time', epoch_mean_time),
tf.summary.scalar('epoch_var_xentropy', epoch_var_xentropy),
tf.summary.scalar('epoch_var_time', epoch_var_time),
tf.summary.scalar('epoch_time_total', epoch_time_total)
])
eval_metric_names = ['xentropy_avg', 'accuracy']
eval_metrics = {}
eval_summaries = []
for eval_metric_name in eval_metric_names:
name_mean = 'eval_mean_{}'.format(eval_metric_name)
name_var = 'eval_var_{}'.format(eval_metric_name)
ph_mean = tf.placeholder(tf.float32, shape=[], name=name_mean)
ph_var = tf.placeholder(tf.float32, shape=[], name=name_var)
summary_mean = tf.summary.scalar(name_mean, ph_mean)
summary_var = tf.summary.scalar(name_var, ph_var)
eval_summaries.append(tf.summary.merge([summary_mean, summary_var]))
eval_metrics[eval_metric_name] = (ph_mean, ph_var)
eval_time = tf.placeholder(tf.float32, shape=[], name='eval_time')
eval_time_summary = tf.summary.scalar('eval_time', eval_time)
eval_summaries = tf.summary.merge([eval_time_summary] + eval_summaries)
# Calculate epoch stuff
train_nexamples = sum([chart.get_nannotations() for chart in charts_train])
examples_per_batch = FLAGS.batch_size
examples_per_batch *= model_train.out_nunroll
batches_per_epoch = train_nexamples // examples_per_batch
nbatches = FLAGS.nepochs * batches_per_epoch
print('{} frames in data, {} batches per epoch, {} batches total'.format(train_nexamples, batches_per_epoch, nbatches))
# Init epoch
lr_summary = model_train.assign_lr(sess, FLAGS.lr)
summary_writer.add_summary(lr_summary, 0)
epoch_xentropies = []
epoch_times = []
batch_num = 0
eval_best_xentropy_avg = float('inf')
eval_best_accuracy = float('-inf')
while FLAGS.nepochs < 0 or batch_num < nbatches:
batch_time_start = time.time()
syms, feats_other, feats_audio, targets, target_weights = model_train.prepare_train_batch(charts_train, **feats_config)
feed_dict = {
model_train.syms: syms,
model_train.feats_other: feats_other,
model_train.feats_audio: feats_audio,
model_train.targets: targets,
model_train.target_weights: target_weights
}
batch_xentropy, _ = sess.run([model_train.avg_neg_log_lhood, model_train.train_op], feed_dict=feed_dict)
epoch_xentropies.append(batch_xentropy)
epoch_times.append(time.time() - batch_time_start)
batch_num += 1
if batch_num % batches_per_epoch == 0:
epoch_num = batch_num // batches_per_epoch
print('Completed epoch {}'.format(epoch_num))
lr_decay = FLAGS.lr_decay_rate ** max(epoch_num - FLAGS.lr_decay_delay, 0)
lr_summary = model_train.assign_lr(sess, FLAGS.lr * lr_decay)
summary_writer.add_summary(lr_summary, batch_num)
epoch_xentropy = np.mean(epoch_xentropies)
print('Epoch mean cross-entropy (nats) {}'.format(epoch_xentropy))
epoch_summary = sess.run(epoch_summaries, feed_dict={epoch_mean_xentropy: epoch_xentropy, epoch_mean_time: np.mean(epoch_times), epoch_var_xentropy: np.var(epoch_xentropies), epoch_var_time: np.var(epoch_times), epoch_time_total: np.sum(epoch_times)})
summary_writer.add_summary(epoch_summary, batch_num)
epoch_xentropies = []
epoch_times = []
if batch_num % FLAGS.nbatches_per_ckpt == 0:
print('Saving model weights...')
ckpt_fp = os.path.join(FLAGS.experiment_dir, 'onset_net_train')
model_saver.save(sess, ckpt_fp, global_step=tf.contrib.framework.get_or_create_global_step())
print('Done saving!')
if do_train_eval and batch_num % FLAGS.nbatches_per_eval == 0:
print('Evaluating...')
eval_start_time = time.time()
metrics = defaultdict(list)
for eval_chart in charts_valid:
if model_eval.do_rnn:
state = sess.run(model_eval.initial_state)
neg_log_prob_sum = 0.0
correct_predictions_sum = 0.0
weight_sum = 0.0
for syms, syms_in, feats_other, feats_audio, targets, target_weights in model_eval.eval_iter(eval_chart, **feats_config_eval):
feed_dict = {
model_eval.syms: syms_in,
model_eval.feats_other: feats_other,
model_eval.feats_audio: feats_audio,
model_eval.targets: targets,
model_eval.target_weights: target_weights
}
if model_eval.do_rnn:
feed_dict[model_eval.initial_state] = state
xentropies, correct_predictions, state = sess.run([model_eval.neg_log_lhoods, model_eval.correct_predictions, model_eval.final_state], feed_dict=feed_dict)
else:
xentropies, correct_predictions = sess.run([model_eval.neg_log_lhoods, model_eval.correct_predictions], feed_dict=feed_dict)
neg_log_prob_sum += | np.sum(xentropies) | numpy.sum |
import configparser
import random
import glob
from enum import IntEnum
from typing import Tuple, Dict, Optional
import gym
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
from gym import spaces
from gym.utils import seeding
from grid_generators.random_maze import random_maze
from grid_generators.random_shape_maze import random_shape_maze
from grid_generators.random_start_goal import random_start_goal, random_starts_goals, random_starts_goals_in_subsquare
from rendering import fill_coords, point_in_rect, highlight_img, downsample
from matplotlib.patches import Circle
from copy import deepcopy
import torch
class WorldObj: # not used yet
"""
Base class for grid world objects
"""
def __init__(self):
self.pos = None
self._observable = True
@property
def observable(self):
return self._observable
def encode(self) -> Tuple[int, ...]:
"""Encode the description of this object"""
raise NotImplementedError
def on_entering(self, agent) -> ():
"""Action to perform when an agent enter this object"""
raise NotImplementedError
def on_leaving(self, agent) -> ():
"""Action to perform when an agent exit this object"""
raise NotImplementedError
def can_overlap(self) -> bool:
"""Can an agent overlap this object?"""
return True
def render(self, r) -> ():
"""Draw this object with the given renderer"""
raise NotImplementedError
class Grid: # not used yet
"""
Base class for grids and operations on it (not used yet)
"""
# Type hints
_obj_2_idx: Dict[Optional[WorldObj], int]
# Static cache of pre-rendered tiles
tile_cache = {}
class EncodingError(Exception):
"""Exception raised for missing entry in _obj_2_idx"""
pass
def __init__(self, width: int, height: int):
"""Create an empty Grid"""
self.width = width
self.height = height
self.grid = np.empty(shape=(width, height), dtype=WorldObj)
self._idx_2_obj = {v: k for k, v in self._obj_2_idx.items()}
@classmethod
def from_array(cls, array: np.ndarray):
(width, height) = array.shape
out = cls(width, height)
out.grid = array
return out
@property
def obj_2_idx(self):
return self._obj_2_idx
def __contains__(self, item):
return item in self.grid
def __eq__(self, other):
grid1 = self.encode()
grid2 = other.encode()
return np.array_equal(grid1, grid2)
def __getitem__(self, item):
out = self.grid.__getitem__(item)
if isinstance(out, WorldObj):
return out
else:
# slice
return Grid.from_array(out)
def __setitem__(self, key, value):
if isinstance(value, Grid):
self.grid.__setitem__(key, value.grid)
else:
self.grid.__setitem__(key, value)
def set(self, i, j, v):
"""Set an element of the grid"""
assert 0 <= i < self.width, "i index out of bounds"
assert 0 <= j < self.height, "j index out of bounds"
self.grid[i, j] = v
def get(self, i, j):
"""Get an element of the grid"""
assert 0 <= i < self.width, "i index out of bounds"
assert 0 <= j < self.height, "j index out of bounds"
return self.grid[i, j]
def slice(self, top_x, top_y, width, height):
"""Get a subset of the grid"""
assert 0 <= top_x < self.width
assert 0 <= top_x + width < self.width
assert 0 <= top_y + width < self.height
assert 0 <= top_y < self.height
return Grid.from_array(self.grid[top_x:(top_x + width), top_y:(top_y + height)])
@classmethod
def render_tile(cls, obj: WorldObj, highlight=False, tile_size=32, subdivs=3):
"""
Render a tile and cache the result
"""
# Hash map lookup key for the cache
key = (highlight, tile_size)
key = obj.encode() + key if obj else key
if key in cls.tile_cache:
return cls.tile_cache[key]
img = np.zeros(shape=(tile_size * subdivs, tile_size * subdivs, 3), dtype=np.uint8)
# Draw the grid lines (top and left edges)
fill_coords(img, point_in_rect(0, 0.031, 0, 1), (100, 100, 100))
fill_coords(img, point_in_rect(0, 1, 0, 0.031), (100, 100, 100))
if obj is not None:
obj.render(img)
# Highlight the cell if needed
if highlight:
highlight_img(img)
# Down-sample the image to perform super-sampling/anti-aliasing
img = downsample(img, subdivs)
# Cache the rendered tile
cls.tile_cache[key] = img
return img
def render(self, tile_size=32, highlight_mask=None):
"""
Render this grid at a given scale
:param tile_size: tile size in pixels
"""
if highlight_mask is None:
highlight_mask = np.zeros(shape=(self.width, self.height), dtype=np.bool)
# Compute the total grid size
width_px = self.width * tile_size
height_px = self.height * tile_size
img = np.zeros(shape=(height_px, width_px, 3), dtype=np.uint8)
# Render the grid
for j in range(0, self.height):
for i in range(0, self.width):
cell = self.get(i, j)
tile_img = Grid.render_tile(
cell,
highlight=highlight_mask[i, j],
tile_size=tile_size
)
ymin = j * tile_size
ymax = (j + 1) * tile_size
xmin = i * tile_size
xmax = (i + 1) * tile_size
img[ymin:ymax, xmin:xmax, :] = tile_img
return img
def encode(self, vis_mask: np.ndarray = None):
"""
Produce a compact numpy encoding of the grid with tuples for each cells
:param vis_mask: numpy array of boolean as a vision mask
:return: numpy array
"""
if vis_mask is None:
vis_mask = np.ones((self.width, self.height), dtype=bool)
assert vis_mask.shape == self.grid.shape
array = np.zeros((self.width, self.height, 2), dtype="uint8") # TODO: enable variable length encoding?
for i in range(self.width):
for j in range(self.height):
if vis_mask[i, j]:
v = self.get(i, j)
if v is None:
try:
array[i, j, 0] = self._obj_2_idx[None], 0
except KeyError:
raise Grid.EncodingError("Empty grid cell encoding index not specified")
if v is not None:
if v.observable:
try:
array[i, j, 0] = self._obj_2_idx[None], 0
except KeyError:
raise Grid.EncodingError("Empty grid cell encoding index not specified for "
"unobservable object")
else:
try:
array[i, j, 0] = self._obj_2_idx[v.__class__]
except KeyError:
raise Grid.EncodingError(f"Grid cell encoding index for {v.__class__} not specified")
array[i, j, :] = v.encode()
return array
@classmethod
def decode(cls, array):
"""
Decode an array grid encoding back into a grid using this grid encoding
:param array: an array grid encoded
:return: grid
"""
width, height, channels = array.shape
assert channels == 2 # TODO: enable variable length encoding?
grid = cls(width, height)
for i in range(width):
for j in range(height):
type_idx, arg = array[i, j]
# TODO : continue
class GridworldAgent:
BATTERY = 150
def __init__(self, agent_id, start, goal):
self.id = agent_id
# Position of the agent
self.init_pos = start
self.pos = start
# Position of its goal
self.init_goal = goal
self.goal = goal
# Battery
self.battery = GridworldAgent.BATTERY # percent
# Boolean to know if the agent is done
self.done = False
class GridWorld(gym.Env):
"""
2D Grid world environment
"""
metadata = {'render.modes': ['human']}
class LegalActions(IntEnum):
""" legal actions"""
left = 0
right = 1
up = 2
down = 3
class GridLegend(IntEnum):
# FREE = 0
OBSTACLE = 1
GCS = 2 # ground charging stations
AGENT = 3
GOAL = 4
# OUT_OF_BOUNDS = 6 # Commented out for now since it interferes with the conv-net input tensor
class UnknownAction(Exception):
"""Raised when an agent try to do an unknown action"""
pass
def __init__(self, n_agents=1, grid=np.ones((5, 5)), partial_obs=False, width=5, height=5,
col_wind=None, range_random_wind=0, probabilities=None):
self.agents = [GridworldAgent(i, None, None) for i in range(n_agents)]
self.grid = grid
self.gcs = np.where(grid == GridWorld.GridLegend.GCS) # Ground charging station
if probabilities is None and range_random_wind == 0:
probabilities = [1] # Zero noise
# Define if the agents use partial observation or global observation
# partial obs broken, don't use it
self.partial_obs = partial_obs
if self.partial_obs:
self.agent_view_width = width
self.agent_view_height = height
self.actions = GridWorld.LegalActions
self.action_space = spaces.Discrete(len(self.actions))
self.observation_space = spaces.Box(low=0, high=1,
shape=(1, len(self.GridLegend) + (n_agents - 1) * 2, *grid.shape), # (dim of encoding, dim of one observation + partial obs of all the other agents, grid width, grid height)
dtype='uint8')
self.agents_initial_pos = [agent.pos for agent in self.agents] # starting position of the agents on the grid
# Wind effects -- TODO: May need to be moved into the world object? or is it okay here?
self.np_random, _ = self.seed() # Seeding the random number generator
if col_wind is None:
col_wind = np.zeros((len(self.grid, )))
self.col_wind = col_wind # Static wind (rightwards is positive)
self.range_random_wind = range_random_wind # Random (dynamic) wind added on top of static wind
self.w_range = np.arange(-self.range_random_wind, self.range_random_wind + 1)
self.probabilities = probabilities # Stochasticity implemented through noise
assert sum(self.probabilities) == 1
self.step_count = 0
self.max_steps = 100
self.rewards = {"free": -0.04,
"obstacles": -0.75,
"goal": 10.0,
"out_of_bounds": -0.8,
"battery_depleted": -10.0}
def reset(self, reset_starts_goals=True, radius=10, reset_grid=True):
if reset_grid:
_grid = self._gen_grid(*self.grid.shape)
gcs = (random.randrange(0, self.grid.shape[0]), random.randrange(0, self.grid.shape[1]))
# following loop is potentially infinite (but then you are very unlucky)
while (_grid[gcs] # if the gcs is on one of the generated obstacle
or (not reset_starts_goals # if we will reset starts and goal no need to go further
and (any(_grid[agent.init_pos] # if any of the generated obstacles is on one of the start position
or _grid[agent.init_goal] # or one of the goal position
or abs(gcs[0] - agent.init_pos[0]) + abs(gcs[1] - agent.init_pos[1]) > 10 # or the generated gcs is out of range from the starting position
or abs(gcs[0] - agent.init_goal[0]) + abs(gcs[1] - agent.init_goal[1]) > 10 # or the goal is out of range from the gcs
for agent in self.agents)))):
# generate new obstacles and gcs
_grid = self._gen_grid(*self.grid.shape)
gcs = (random.randrange(0, self.grid.shape[0]), random.randrange(0, self.grid.shape[1]))
# if not reset_starts_goals:
# starts, goals = (agent.init_pos for agent in self.agents), (agent.init_goal for agent in self.agents)
# tries = 0 # just to protect from the very unlikely case of an infinite loop
# while tries < 100 and any(_grid[start] for start in starts) or any(_grid[goal] for goal in goals):
# # if any of the generated obstacles is on one of the goal or start positions :
# # generate new obstacles
# _grid = self._gen_grid(*self.grid.shape)
# if tries == 100:
# _grid = np.zeros(self.grid.shape)
_grid[gcs] = self.GridLegend.GCS
self.grid = _grid
self.gcs = gcs
print(f"New grid generated, gcs is in {gcs}")
if reset_starts_goals:
starts, goals = self._gen_starts_goals_positions(radius)
# following loop is potentially infinite
while (any(self.grid[start] # if any of the start position is on an obstacle
or abs(self.gcs[0] - start[0]) + abs(self.gcs[1] - start[1]) > 10 # or out of range of the gcs
for start in starts)
or any(self.grid[goal] # if any of the goal position is on an obstacle
or abs(self.gcs[0] - goal[0]) + abs(self.gcs[1] - goal[1]) > 10 # or out of range of the gcs
for goal in goals)):
# generate new positions
starts, goals = self._gen_starts_goals_positions(radius)
# while (any(self.grid[start] for start in starts) or any(self.grid[goal] for goal in goals)):
# # if any of the generated position is on one of the obstacles : generate new positions
# starts, goals = self._gen_starts_goals_positions(radius)
print(f"New starts are {starts} and new goals are {goals}")
for i in range(len(self.agents)):
agent = self.agents[i]
agent.init_pos = starts[i]
agent.init_goal = goals[i]
for agent in self.agents:
agent.pos = agent.init_pos
agent.goal = agent.init_goal
agent.done = False
agent.battery = GridworldAgent.BATTERY
# self.render() # show the initial arrangement of the grid
# return first observation
return self.gen_obs()
def _gen_grid(self, width, height):
"""Generate a new grid"""
# _grid = random_shape_maze(width, height, max_shapes=5, max_size=3, allow_overlap=False)
# _grid = np.genfromtxt(random.choice(glob.glob("sample_grid/*.csv")), delimiter=',')
_grid = np.zeros(self.grid.shape)
return _grid
def _gen_starts_goals_positions(self, radius=10):
"""Generate new starts and goal positions for the agents of the environment"""
# # first row / last row ?
# start_bounds = ((0, 1), (0, self.grid.shape[0]))
# goal_bounds = ((self.grid.shape[0] - 1, self.grid.shape[0]), (0, self.grid.shape[0]))
# starts, goals = random_starts_goals(n=len(self.agents), width=self.grid.shape[0],
# start_bounds=start_bounds, goal_bounds=goal_bounds)
# whole grid ?
start_bounds = ((0, self.grid.shape[1]), (0, self.grid.shape[0]))
goal_bounds = ((0, self.grid.shape[1]), (0, self.grid.shape[0]))
starts, goals = random_starts_goals(n=len(self.agents), width=self.grid.shape[0],
start_bounds=start_bounds, goal_bounds=goal_bounds)
# # within a sub_grid ?
# starts, goals = random_starts_goals_in_subsquare(n=len(self.agents), width=self.grid.shape[0], sub_width=radius)
# # goal around gcs ?
# if radius >= self.grid.shape[0]:
# start_bounds = ((0, self.grid.shape[1]), (0, self.grid.shape[0]))
# goal_bounds = ((0, self.grid.shape[1]), (0, self.grid.shape[0]))
# else:
# x,y = self.gcs[0], self.gcs[1]
# top_x = x-radius+1 if x-radius+1>=0 else 0
# top_y = y-radius+1 if y-radius+1>=0 else 0
# bottom_x = x+radius if x+radius-1<self.grid.shape[0] else self.grid.shape[0]
# bottom_y = y+radius if y+radius-1<self.grid.shape[1] else self.grid.shape[1]
# start_bounds = ((0, self.grid.shape[1]), (0, self.grid.shape[0]))
# goal_bounds = ((top_x, bottom_x), (top_y, bottom_y))
# starts, goals = random_starts_goals(n=len(self.agents), width=self.grid.shape[0],
# start_bounds=start_bounds, goal_bounds=goal_bounds)
return starts, goals
def trans_function(self, state, action, noise):
"""Creating transition function based on environmental factors
For now, only wind considered -> static + random (pre-defined probabilities that the agent can
figure out through experience)"""
n, m = state
if self.col_wind[n] != 0:
wind = self.col_wind[n] + noise
else:
wind = 0 # Irrespective of random noise
# Go UP
if action == self.actions.up:
(n, m) = (n - 1, m + wind)
# Go DOWN
elif action == self.actions.down:
(n, m) = (n + 1, m + wind)
# Go LEFT
elif action == self.actions.left:
(n, m) = (n, m - 1 + wind)
# Go RIGHT
elif action == self.actions.right:
(n, m) = (n, m + 1 + wind)
return n, m
def _reward_agent(self, i):
""" compute the reward for the i-th agent in the current state"""
illegal = False
done = False
agent = self.agents[i]
(n, m) = agent.pos
# check for out of bounds
if not (0 <= n < self.grid.shape[0] and 0 <= m < self.grid.shape[1]):
reward = self.rewards["out_of_bounds"]
# done = True
illegal = True
# check for collisions with obstacles (statics and dynamics)
# for now it only checks for obstacles and others agents but it could be generalized with
# the definition of Cell objects : check if cell is empty or not
elif (self.grid[n, m] == self.GridLegend.OBSTACLE # obstacles
or (n, m) in [self.agents[j].pos for j in range(len(self.agents)) if j != i]): # other agents
reward = self.rewards["obstacles"]
illegal = True
# done = True
# check if agent reached its goal
elif (n, m) == agent.goal:
reward = self.rewards["goal"]
done = True
# penalise the agent for extra moves
else:
reward = self.rewards["free"]
if agent.battery <= 0:
reward = self.rewards["battery_depleted"]
done = True
return reward, illegal, done
def step(self, actions):
self.step_count += 1
assert len(actions) == len(self.agents), "number of actions must be equal to number of agents"
# get a random permutation ( agents actions/reward must be order-independent)
# random_order = np.random.permutation(len(self.agents))
rewards = np.zeros(len(actions))
# compute the moves
moves = [agent.pos for agent in self.agents]
for i in range(len(self.agents)):
# agent mission is already done
if self.agents[i].done:
continue
action = actions[i]
agent = self.agents[i]
# agent current pos
(n, m) = agent.pos # n is the row number, m is the column number
# Adding random noise (wind) to the action
noise = self.np_random.choice(self.w_range, p=self.probabilities)
# Generate move
(n_, m_) = self.trans_function((n, m), action, noise)
# Store backup of move for each agent (i)
moves[i] = (n_, m_)
# compute rewards and apply moves if they are legal:
# remember old positions
old_pos = [agent.pos for agent in self.agents]
# apply the moves (even if illegal)
for i in range(len(self.agents)):
# agent mission is already done
if self.agents[i].done:
continue
self.agents[i].pos = moves[i]
self.agents[i].battery -= 10
# compute rewards and illegal assertions and cancel move if illegal
illegals = [False for agent in self.agents]
done = [agent.done for agent in self.agents]
for i in range(len(self.agents)):
# agent mission is already done
if self.agents[i].done:
continue
# compute rewards and illegal assertions
rewards[i], illegals[i], done[i] = self._reward_agent(i)
# recursively solve conflicts
# now stop computing reward after first canceled move
# we could also apply reward after the cancelling :
# it would keep good reward when no conflict and apply the "last" bad reward obtained during conflict resolution
# even if the conflict happend after a canceled move and a return to old position (case when another agent took the old position)
while any(illegals): # recursively solve conflicts
# cancel moves if illegal
for i in range(len(self.agents)):
if illegals[i]:
self.agents[i].pos = old_pos[i]
illegals[i] = False
# compute new illegal assertions in case of newly created conflict because of the return to an old position
for i in range(len(self.agents)):
# agent mission is already done or if its move was canceled (remove second condition to apply above suggested idea)
if self.agents[i].done or self.agents[i].pos == old_pos[i]:
continue
# compute rewards and illegal assertions
rewards[i], illegals[i], done[i] = self._reward_agent(i)
# handle specific cells events and apply done statements after
for i in range(len(self.agents)):
# agent mission is already done
if self.agents[i].done:
continue
# if agent reachs a GCS, charge up the battery
if self.grid[self.agents[i].pos] == self.GridLegend.GCS:
self.agents[i].battery = GridworldAgent.BATTERY # TODO : look into slow charging (need to introduce a new "stay idle" action)
self.agents[i].done = done[i]
# game over if all the agents are done
done = [agent.done for agent in self.agents]
# compute observation
obs = self.gen_obs()
return obs, rewards, done, {}
def gen_obs(self, tensor=1):
"""Generate the observation"""
return [self._gen_obs_agent(agent, tensor) for agent in self.agents]
def _gen_obs_agent(self, agent, tensor=1):
"""Generate the agent's view"""
if self.partial_obs:
x, y = agent.pos
w, h = self.agent_view_width, self.agent_view_height
sub_grid = np.full((w, h), self.GridLegend.OUT_OF_BOUNDS)
# compute sub_grid corners
top_x, top_y = x - w // 2, y - h // 2
for j in range(0, h):
for i in range(0, w):
n = top_x + i
m = top_y + j
if 0 <= n < self.grid.shape[0] and 0 <= m < self.grid.shape[1]:
sub_grid[i, j] = self.grid[n, m]
return sub_grid
else:
canvas = self.grid.copy()
# canvas[agent.pos] = self.GridLegend.AGENT # TODO: add other agents in the local obs of single agents
# # only mark the goal of the agent (not the ones of the others)
# canvas[agent.goal] = self.GridLegend.GOAL
# # Convert to len(dict)-dimensional tensor for Conv_SQN. Can turn on or off
# if tensor == 1:
# canvas = self.grid2tensor(canvas)
# return canvas
key_grids = []
for key in self.GridLegend:
idx = np.where(canvas == key)
key_grid = | np.zeros(canvas.shape) | numpy.zeros |
import config
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import os
import random
import torch
from nms import nms
from collections import Counter
from torch.utils.data import DataLoader
from tqdm import tqdm
def plot_particles(image, boxes, scores=True, pixels=False):
"""Plots predicted particles on the image"""
im = | np.array(image) | numpy.array |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from mo.middle.passes.fusing.decomposition import convert_scale_shift_to_mul_add, convert_batch_norm
from mo.utils.unittest.graph import build_graph
from mo.utils.ir_engine.compare_graphs import compare_graphs
nodes_attributes = {
'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# ScaleShift layer
'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift', 'axis': 0},
'const_scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'const_scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'op'},
'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Mul and Add operations
'mul_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Mul'},
'const_mul_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'add_1': {'type': None, 'kind': 'op', 'op': 'Add'},
'const_add_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'add_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'add_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Mul and Add operations
'mul_2': {'type': None, 'kind': 'op', 'op': 'Mul'},
'const_mul_2_w': {'value': None, 'shape': None, 'kind': 'op'},
'mul_2_w': {'value': None, 'shape': None, 'kind': 'data'},
'mul_2_data': {'value': None, 'shape': None, 'kind': 'data'},
'add_2': {'type': None, 'kind': 'op', 'op': 'Add'},
'const_add_2_w': {'value': None, 'shape': None, 'kind': 'op'},
'add_2_w': {'value': None, 'shape': None, 'kind': 'data'},
'add_2_data': {'value': None, 'shape': None, 'kind': 'data'},
# Reshape
'placeholder_2/Reshape_': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'placeholder_2/Reshape_data': {'value': None, 'shape': None, 'kind': 'data'},
'placeholder_2/Reshape_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None},
'placeholder_2/Reshape_const_data': {'kind': 'data', 'value': None, 'shape': None},
# BatchNorm operation
'bn_op': {'type': None, 'kind': 'op', 'op': 'BatchNorm', 'can_be_fused': True},
'const_bn_const': {'value': None, 'shape': None, 'kind': 'op'},
'bn_const': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_beta': {'value': None, 'shape': None, 'kind': 'op'},
'bn_beta': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_mean': {'value': None, 'shape': None, 'kind': 'op'},
'bn_mean': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_var': {'value': None, 'shape': None, 'kind': 'op'},
'bn_var': {'value': None, 'shape': None, 'kind': 'data'},
'bn_data': {'value': None, 'shape': None, 'kind': 'data'},
# Concat1 operation
'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
'concat_data': {'value': None, 'shape': None, 'kind': 'data'},
'op_output': {'kind': 'op', 'op': 'Result'}
}
class ScaleShiftToMulAdd(unittest.TestCase):
# ScaleShift -> Mul
def test_scaleshift_to_mul_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift 2 inputs-> Mul
def test_scaleshift2_to_mul(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_1_data', 'scaleshift_1'),
('placeholder_2_data', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([1, 227])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_2_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),
('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),
('placeholder_1_data', 'mul_1'),
('placeholder_2/Reshape_data', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([1, 227])},
'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift 2 inputs-> Mul (axis = 1)
def test_scaleshift2_axis1_to_mul(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_1_data', 'scaleshift_1'),
('placeholder_2_data', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([227])},
'scaleshift_1': {'axis': 1},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_2_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),
('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),
('placeholder_1_data', 'mul_1'),
('placeholder_2/Reshape_data', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([227])},
'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> Mul (Zero biases)
def test_scaleshift_to_mul_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> Mul->Add
def test_scaleshift_to_mul_add(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'const_add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
'add_1': {'can_be_fused': True},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> None (Zero weights and biases)
def test_scaleshift_to_nothing(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': | np.array([1, 1, 1]) | numpy.array |
#!usr/bin/env ipython
# Functions related to loading, saving, processing datasets
import tensorflow.keras.datasets as datasets
from tensorflow.keras import Model
import numpy as np
import pandas as pd
import os
from pathlib import Path
from scipy.stats import entropy
from scipy.spatial.distance import cosine
from sklearn.random_projection import GaussianRandomProjection
from sklearn.decomposition import PCA
import ipdb
from cfg_utils import load_cfg
from model_utils import build_model
# CONSTANTS
FOREST_PATH = os.path.join('data', 'covtype.data')
ADULT_PATH = os.path.join('data', 'adult.data')
ADULT_TEST_PATH = os.path.join('data', 'adult.test')
CIFAR10_PRETRAIN_PATH = os.path.join('data', 'cifar10_pretrain.npy')
def min_max_rescale(df_train, df_test, good_columns=None):
if good_columns is None:
col_mins = df_train.min(axis=0)
col_maxs = df_train.max(axis=0)
col_ranges = col_maxs - col_mins
good_columns = (col_ranges > 0)
print('Deleting', df_train.shape[1] - sum(good_columns), 'columns for not exhibiting variability')
df_train = df_train[:, good_columns]
df_test = df_test[:, good_columns]
print('Rescaling to [0, 1]...')
col_mins = df_train.min(axis=0)
col_maxs = df_train.max(axis=0)
col_ranges = np.float32(col_maxs - col_mins)
# if there's no variability, basically just mapping it to 0.5
col_ranges[col_ranges == 0] = 2*col_maxs[col_ranges == 0] + 1e-5
df_train = (df_train - col_mins)/col_ranges
df_test = (df_test - col_mins)/col_ranges
assert np.isnan(df_train).sum() == 0
assert np.isnan(df_test).sum() == 0
return df_train, df_test
def load_data(options, replace_index):
# these are shared options
data_type = options['name']
data_privacy = 'all'
print('WARNING: Data privacy is fixed to all right now')
if data_type == 'mnist':
flatten = options['flatten']
binary = options['binary']
if binary:
# only care about doing this for binary classification atm, could just make an option
enforce_max_norm = True
else:
enforce_max_norm = False
if 'preprocessing' in options:
if options['preprocessing'] == 'PCA':
project = True
pca = True
crop = False
elif options['preprocessing'] == 'GRP':
project = True
pca = False
crop = False
elif options['preprocessing'] == 'crop':
project = False
pca = False
crop = True
else:
project = False
pca = False
crop = False
x_train, y_train, x_test, y_test = load_mnist(binary=binary,
enforce_max_norm=enforce_max_norm,
flatten=flatten,
data_privacy=data_privacy,
project=project,
crop=crop,
pca=pca)
elif data_type == 'cifar10':
flatten = options['flatten']
binary = options['binary']
subset = options['subset']
if binary:
enforce_max_norm = True
else:
enforce_max_norm = False
if flatten:
project = True
pca = True
else:
project = False
pca = False
x_train, y_train, x_test, y_test = load_cifar10(binary=binary,
enforce_max_norm=enforce_max_norm,
flatten=flatten,
data_privacy=data_privacy,
project=project,
pca=pca,
subset=subset)
elif data_type == 'cifar10_pretrain':
binary = options['binary']
if binary:
enforce_max_norm = True
else:
enforce_max_norm = False
x_train, y_train, x_test, y_test = load_cifar10_pretrain(binary=binary,
enforce_max_norm=enforce_max_norm)
elif data_type == 'cifar100':
# No options here
x_train, y_train, x_test, y_test = load_cifar100()
elif data_type == 'forest':
x_train, y_train, x_test, y_test = load_forest(data_privacy=data_privacy)
elif data_type == 'adult':
pca = False
if 'preprocessing' in options and options['preprocessing'] == 'PCA':
print('WARNING: When are we doing PCA with adult?')
pca = True
x_train, y_train, x_test, y_test = load_adult(data_privacy=data_privacy, pca=pca)
else:
raise ValueError(data_type)
x_train, y_train, x_vali, y_vali, x_test, y_test = validation_split(x_train, y_train, x_test, y_test, replace_index)
# Convert everything to float32
x_train = np.float32(x_train)
y_train = np.float32(y_train)
x_vali = np.float32(x_vali)
y_vali = np.float32(y_vali)
x_test = np.float32(x_test)
y_test = np.float32(y_test)
return x_train, y_train, x_vali, y_vali, x_test, y_test
def validation_split(x_train, y_train, x_test, y_test, replace_index):
# we need to generate a validation set (do it from the train set)
N = x_train.shape[0]
n_vali = int(0.1*N)
vali_idx = range(n_vali)
train_idx = [i for i in range(N) if i not in vali_idx]
assert len(set(vali_idx).intersection(set(train_idx))) == 0
x_vali = x_train[vali_idx]
y_vali = y_train[vali_idx]
x_train = x_train[train_idx]
y_train = y_train[train_idx]
if replace_index:
replace_index = int(replace_index)
# we always replace with ELEMENT 0 (wlog, ish), then don't use the first row
# (this is to avoid an effect where experiments where the replace_index is low encounter an unusually
# low-variance batch at the start of training!)
special_idx = 0
x_special = x_train[special_idx]
y_special = y_train[special_idx]
x_train[replace_index] = x_special
y_train[replace_index] = y_special
x_train = np.delete(x_train, special_idx, axis=0)
y_train = np.delete(y_train, special_idx, axis=0)
return x_train, y_train, x_vali, y_vali, x_test, y_test
def load_forest(data_privacy='all'):
path = os.path.join('data', 'forest_' + data_privacy + '.npy')
try:
data = np.load(path, allow_pickle=True).item()
x_train = data['x_train']
x_test = data['x_test']
y_train = data['y_train']
y_test = data['y_test']
except FileNotFoundError:
print('Loading...')
all_data = pd.read_csv(FOREST_PATH, header=None)
# select just types 1 and 2 (these are the most common)
print('Selecting classes 1 and 2')
binary_data = all_data.loc[all_data.iloc[:, -1].isin({1, 2}), :]
# split into features and labels
y = binary_data.iloc[:, -1].values
# rescale to 0 and 1!
y = y - 1
assert set(y) == set([0, 1])
features = binary_data.iloc[:, :-1].values
assert features.shape[1] == 54
N = features.shape[0]
print('Resulting number of examples:', N)
# test-train split
print('Doing test-train split')
train_frac = 0.85
n_train = int(N*train_frac)
train_idx = np.random.choice(N, n_train, replace=False)
test_idx = [x for x in range(N) if x not in train_idx]
print('n train:', n_train, 'n test:', len(test_idx))
x_train = features[train_idx, :]
x_test = features[test_idx, :]
y_train = y[train_idx]
y_test = y[test_idx]
# need to keep this to make sure the columns are all the same... when we do public/private split
x_train_orig = x_train.copy()
# do public/private split
x_train, y_train, x_test, y_test = public_private_split('forest', data_privacy,
x_train, y_train,
x_test, y_test)
# now we need to normalise this
# rescale to 0-1 first
col_mins = x_train_orig.min(axis=0)
col_maxs = x_train_orig.max(axis=0)
col_ranges = col_maxs - col_mins
good_columns = (col_ranges > 0)
del x_train_orig
x_train, x_test = min_max_rescale(x_train, x_test, good_columns=good_columns)
# and NOW we project to the unit sphere
print('Projecting to sphere...')
x_train = x_train / np.linalg.norm(x_train, axis=1).reshape(-1, 1)
x_test = x_test / np.linalg.norm(x_test, axis=1).reshape(-1, 1)
assert np.all(np.abs(np.linalg.norm(x_train, axis=1) - 1) < 1e-6)
assert np.all(np.abs(np.linalg.norm(x_test, axis=1) - 1) < 1e-6)
data = {'x_train': x_train,
'x_test': x_test,
'y_train': y_train,
'y_test': y_test}
print('Saving...')
np.save(path, data)
return x_train, y_train, x_test, y_test
def public_private_split(dataset, data_privacy, x_train, y_train, x_test, y_test):
"""
"""
if data_privacy == 'all':
print('Including all data')
else:
print('Splitting data into public/private!')
split_path = os.path.join('data', dataset + '_public_private_split.npy')
try:
split = np.load(split_path, allow_pickle=True).item()
print('Loaded pre-computed split from', split_path)
public_train_idx = split['public_train_idx']
public_test_idx = split['public_test_idx']
private_train_idx = split['private_train_idx']
private_test_idx = split['private_test_idx']
except FileNotFoundError:
print('No pre-defined split found!')
N_train = x_train.shape[0]
N_test = x_test.shape[0]
public_train_idx = np.random.choice(N_train, int(0.5*N_train), replace=False)
public_test_idx = np.random.choice(N_test, int(0.5*N_test), replace=False)
private_train_idx = np.array([i for i in range(N_train) if i not in public_train_idx])
private_test_idx = np.array([i for i in range(N_test) if i not in public_test_idx])
assert len(set(public_train_idx).intersection(set(private_train_idx))) == 0
assert len(set(public_test_idx).intersection(set(private_test_idx))) == 0
split = {'public_train_idx': public_train_idx,
'public_test_idx': public_test_idx,
'private_train_idx': private_train_idx,
'private_test_idx': private_test_idx}
np.save(split_path, split)
print('Saved split to', split_path)
if data_privacy == 'public':
x_train = x_train[public_train_idx]
y_train = y_train[public_train_idx]
x_test = x_test[public_test_idx]
y_test = y_test[public_test_idx]
elif data_privacy == 'private':
x_train = x_train[private_train_idx]
y_train = y_train[private_train_idx]
x_test = x_test[private_test_idx]
y_test = y_test[private_test_idx]
return x_train, y_train, x_test, y_test
def load_mnist(binary=False, enforce_max_norm=False, flatten=True,
data_privacy='all', project=True, pca=False, crop=False):
dataset_identifier = 'mnist' + '_' + data_privacy + '_binary'*binary + '_maxnorm'*enforce_max_norm + '_square'*(not flatten) + '_pca'*pca + '_crop'*crop + '.npy'
dataset_string = os.path.join('data', dataset_identifier)
try:
data = np.load(dataset_string, allow_pickle=True).item()
x_train = data['x_train']
x_test = data['x_test']
y_train = data['y_train']
y_test = data['y_test']
print('Loaded data from', dataset_string)
except FileNotFoundError:
print('Couldn\'t load data from', dataset_string)
# cant load from file, build it up again
mnist = datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, y_train, x_test, y_test = public_private_split('mnist', data_privacy, x_train, y_train, x_test, y_test)
if binary:
# keep only 3 and 5 (I chose these randomly)
keep_train = (y_train == 3) | (y_train == 5)
keep_test = (y_test == 3) | (y_test == 5)
x_train = x_train[keep_train]
x_test = x_test[keep_test]
y_train = y_train[keep_train]
y_test = y_test[keep_test]
# convert to binary (5 is 1, 3 is 0)
y_train[y_train == 5] = 1
y_train[y_train == 3] = 0
y_test[y_test == 5] = 1
y_test[y_test == 3] = 0
# sanity check
assert set(y_train) == {1, 0}
assert set(y_test) == {1, 0}
# typical normalisation
x_train, x_test = x_train/255.0, x_test/255.0
if crop:
assert x_train.shape[1:] == (28, 28)
assert x_test.shape[1:] == (28, 28)
x_train = x_train[:, 9:19, 9:19]
x_test = x_test[:, 9:19, 9:19]
side_length = 10
else:
side_length = 28
if flatten:
x_train = x_train.reshape(-1, side_length*side_length)
x_test = x_test.reshape(-1, side_length*side_length)
if project:
# you can only project flattened data
# by default we do gaussian random projections
if pca:
# do PCA down to 50
# in the Abadi paper they do 60 dimensions, but to help comparison with Wu I'd rather do 50 here
transformer = PCA(n_components=50)
else:
# do random projection on MNIST
# in the Wu paper they project to 50 dimensions
transformer = GaussianRandomProjection(n_components=50)
# fit to train data
transformer.fit(x_train)
# transform everything
x_train = transformer.transform(x_train)
x_test = transformer.transform(x_test)
assert x_train.shape[1] == 50
assert x_test.shape[1] == 50
else:
# keeping it not-flat
# just add a sneaky little dimension on there for the CNN
x_train = x_train.reshape(-1, side_length, side_length, 1)
x_test = x_test.reshape(-1, side_length, side_length, 1)
if enforce_max_norm:
# slightly different normalisation to what's normal in MNIST
if len(x_train.shape) == 2:
axis = (1)
train_norms = np.linalg.norm(x_train, axis=axis).reshape(-1, 1)
test_norms = np.linalg.norm(x_test, axis=axis).reshape(-1, 1)
elif len(x_train.shape) == 4:
axis = (1, 2)
train_norms = np.linalg.norm(x_train, axis=axis).reshape(-1, 1, 1, 1)
test_norms = np.linalg.norm(x_test, axis=axis).reshape(-1, 1, 1, 1)
else:
raise ValueError(x_train.shape)
x_train = np.where(train_norms > 1, x_train/train_norms, x_train)
x_test = np.where(test_norms > 1, x_test/test_norms, x_test)
assert np.all(np.abs(np.linalg.norm(x_train, axis=axis) - 1) < 1e-6)
assert np.all(np.abs(np.linalg.norm(x_test, axis=axis) - 1) < 1e-6)
data = {'x_train': x_train,
'x_test': x_test,
'y_train': y_train,
'y_test': y_test}
np.save(dataset_string, data)
print('Saved data to', dataset_string)
return x_train, y_train, x_test, y_test
def load_cifar10(binary=False, enforce_max_norm=False, flatten=True,
data_privacy='all', project=True, pca=False, crop=False,
subset: bool = True):
"""
copying what i did for mnist, but for cifar10
cropping is also a 10x10 square in the middle
"""
dataset_identifier = 'cifar10' + '_' + data_privacy + '_binary'*binary + '_maxnorm'*enforce_max_norm + '_square'*(not flatten) + '_pca'*pca + '_crop'*crop + '_subset'*subset + '.npy'
dataset_string = os.path.join('data', dataset_identifier)
try:
data = np.load(dataset_string, allow_pickle=True).item()
x_train = data['x_train']
x_test = data['x_test']
y_train = data['y_train']
y_test = data['y_test']
print('Loaded data from', dataset_string)
except FileNotFoundError:
print('Couldn\'t load data from', dataset_string)
cifar10 = datasets.cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = y_train[:, 0]
y_test = y_test[:, 0]
x_train, y_train, x_test, y_test = public_private_split('cifar10', data_privacy,
x_train, y_train,
x_test, y_test)
if binary:
# keep only 3 and 5
# coincidentally, although i chose 3 and 5 randomly for MNIST,
# in CIFAR10 these correspond to cats and dogs, which is a convenient pair
keep_train = (y_train == 0) | (y_train == 2)
keep_test = (y_test == 0) | (y_test == 2)
x_train = x_train[keep_train]
x_test = x_test[keep_test]
y_train = y_train[keep_train]
y_test = y_test[keep_test]
# convert to binary (2 is 1, 0 is 0)
y_train[y_train == 2] = 1
y_train[y_train == 0] = 0
y_test[y_test == 2] = 1
y_test[y_test == 0] = 0
# sanity check
assert set(y_train) == {1, 0}
assert set(y_test) == {1, 0}
# typical normalisation
x_train, x_test = x_train/255.0, x_test/255.0
if crop:
assert x_train.shape[1:] == (32, 32, 3)
assert x_test.shape[1:] == (32, 32, 3)
x_train = x_train[:, 11:21, 11:21, :]
x_test = x_test[:, 11:21, 11:21, :]
side_length = 10
else:
side_length = 32
if flatten:
# greyscale conversion from RGB
# Y = 0.2989 R + 0.5870 G + 0.1140 B
# greyscale_weights = [0.2989, 0.5870, 0.1140]
# x_train = 1 - np.dot(x_train, greyscale_weights)
# x_test = 1 - np.dot(x_test, greyscale_weights)
x_train = x_train.reshape(-1, side_length*side_length*3)
x_test = x_test.reshape(-1, side_length*side_length*3)
if project:
# you can only project flattened data
n_dim = 50
# by default we do gaussian random projections
if pca:
# do PCA down to 50
# in the Abadi paper they do 60 dimensions, but to help comparison with Wu I'd rather do 50 here
transformer = PCA(n_components=n_dim)
else:
# do random projection on MNIST
# in the Wu paper they project to 50 dimensions
transformer = GaussianRandomProjection(n_components=n_dim)
# fit to train data
transformer.fit(x_train)
# transform everything
x_train = transformer.transform(x_train)
x_test = transformer.transform(x_test)
assert x_train.shape[1] == n_dim
assert x_test.shape[1] == n_dim
else:
# keeping it not-flat
assert len(x_train.shape) == 4
assert len(x_test.shape) == 4
if enforce_max_norm:
if len(x_train.shape) == 2:
axis = (1)
train_norms = np.linalg.norm(x_train, axis=axis).reshape(-1, 1)
test_norms = np.linalg.norm(x_test, axis=axis).reshape(-1, 1)
elif len(x_train.shape) == 4:
axis = (1, 2)
train_norms = np.linalg.norm(x_train, axis=axis).reshape(-1, 1, 1, 1)
test_norms = np.linalg.norm(x_test, axis=axis).reshape(-1, 1, 1, 1)
else:
raise ValueError(x_train.shape)
x_train = np.where(train_norms > 1, x_train/train_norms, x_train)
x_test = np.where(test_norms > 1, x_test/test_norms, x_test)
assert np.all(np.abs(np.linalg.norm(x_train, axis=axis) - 1) < 1e-6)
assert np.all(np.abs(np.linalg.norm(x_test, axis=axis) - 1) < 1e-6)
if subset:
# Copying Yeom, take a random 15,000 samples from the dataset
# and make the train and test splits the same size
# take the train from the train
assert x_train.shape[0] >= 15000
train_idx_subset = np.random.choice(x_train.shape[0], 15000, replace=False)
remaining_available = [x for x in range(15000) if not x in train_idx_subset]
x_train = x_train[train_idx_subset]
y_train = y_train[train_idx_subset]
assert x_test.shape[0] < 15000
remaining_required = 15000 - x_test.shape[0]
test_idx_additional = np.random.choice(remaining_available, remaining_required, replace=False)
for x in test_idx_additional:
assert x not in train_idx_subset
x_test_additional = x_train[test_idx_additional]
y_test_additional = y_train[test_idx_additional]
x_test = np.concatenate([x_test, x_test_additional])
y_test = np.concatenate([y_test, y_test_additional])
assert x_train.shape[0] == 15000
assert y_train.shape[0] == 15000
assert x_test.shape[0] == 15000
assert y_test.shape[0] == 15000
data = {'x_train': x_train,
'x_test': x_test,
'y_train': y_train,
'y_test': y_test}
np.save(dataset_string, data)
print('Saved data to', dataset_string)
return x_train, y_train, x_test, y_test
def load_cifar10_pretrain(binary=False, enforce_max_norm=False):
"""
"""
dataset_identifier = f'cifar10_pretrain{binary*"_binary"}{enforce_max_norm*"_maxnorm"}.npy'
dataset_string = os.path.join('data', dataset_identifier)
try:
data = np.load(dataset_string, allow_pickle=True).item()
x_train = data['x_train']
x_test = data['x_test']
y_train = data['y_train']
y_test = data['y_test']
print('Loaded data from', dataset_string)
except FileNotFoundError:
print('Couldn\'t load data from', dataset_string)
print(f'Attempting to load data from {CIFAR10_PRETRAIN_PATH}')
try:
cifar10_pretrain = np.load(CIFAR10_PRETRAIN_PATH, allow_pickle=True).item()
x_train = cifar10_pretrain['x_train']
x_test = cifar10_pretrain['x_test']
y_train = cifar10_pretrain['y_train']
y_test = cifar10_pretrain['y_test']
print(f'Loaded pre-processed data from {CIFAR10_PRETRAIN_PATH}')
except FileNotFoundError:
print(f'ERROR: Couldn\'t find {CIFAR10_PRETRAIN_PATH}!')
print('... are you sure you have already preprocessed CIFAR10 using the CIFAR100 model?')
raise FileNotFoundError
if binary:
# Copied from load_cifar10
# keep only 3 and 5
# coincidentally, although i chose 3 and 5 randomly for MNIST,
# in CIFAR10 these correspond to cats and dogs, which is a convenient pair
keep_train = (y_train == 0) | (y_train == 2)
keep_test = (y_test == 0) | (y_test == 2)
x_train = x_train[keep_train]
x_test = x_test[keep_test]
y_train = y_train[keep_train]
y_test = y_test[keep_test]
# convert to binary (2 is 1, 0 is 0)
y_train[y_train == 2] = 1
y_train[y_train == 0] = 0
y_test[y_test == 2] = 1
y_test[y_test == 0] = 0
# sanity check
assert set(y_train) == {1, 0}
assert set(y_test) == {1, 0}
if enforce_max_norm:
assert len(x_train.shape) == 2
train_norms = np.linalg.norm(x_train, axis=1).reshape(-1, 1)
test_norms = np.linalg.norm(x_test, axis=1).reshape(-1, 1)
x_train = np.where(train_norms > 1, x_train/train_norms, x_train)
x_test = np.where(test_norms > 1, x_test/test_norms, x_test)
# Don't need an abs because it just neesd to be BELOW 1, not equal to q
assert np.all(np.linalg.norm(x_train, axis=1) - 1 < 1e-6)
assert np.all(np.linalg.norm(x_test, axis=1) - 1 < 1e-6)
data = {'x_train': x_train,
'x_test': x_test,
'y_train': y_train,
'y_test': y_test}
np.save(dataset_string, data)
print('Saved data to', dataset_string)
return x_train, y_train, x_test, y_test
def load_cifar100():
"""
We only use CIFAR100 for pretraining a CNN for CIFAR10, so we don't need to
be able to flatten, etc.
"""
dataset_identifier = 'cifar100.npy'
dataset_string = os.path.join('data', dataset_identifier)
try:
data = np.load(dataset_string, allow_pickle=True).item()
x_train = data['x_train']
x_test = data['x_test']
y_train = data['y_train']
y_test = data['y_test']
print('Loaded data from', dataset_string)
except FileNotFoundError:
print('Couldn\'t load data from', dataset_string)
cifar100 = datasets.cifar100
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
ipdb.set_trace()
y_train = y_train[:, 0]
y_test = y_test[:, 0]
# typical normalisation
x_train, x_test = x_train/255.0, x_test/255.0
# keeping it not-flat
assert len(x_train.shape) == 4
assert len(x_test.shape) == 4
data = {'x_train': x_train,
'x_test': x_test,
'y_train': y_train,
'y_test': y_test}
np.save(dataset_string, data)
print('Saved data to', dataset_string)
return x_train, y_train, x_test, y_test
def load_adult(data_privacy='all', pca=False):
"""
"""
path = os.path.join('data', 'adult' + '_' + data_privacy + '_pca'*pca + '.npy')
try:
data = np.load(path, allow_pickle=True).item()
x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']
print('Loaded from file')
except FileNotFoundError:
adult_header = ['age',
'workclass',
'fnlwgt',
'education',
'education-num',
'marital-status',
'occupation',
'relationship',
'race',
'sex',
'capital-gain',
'capital-loss',
'hours-per-week',
'native-country',
'label']
df = pd.read_csv(ADULT_PATH, sep=', ', header=None)
df_test = pd.read_csv(ADULT_TEST_PATH, sep=', ', skiprows=1, header=None)
df.columns = adult_header
df_test.columns = adult_header
label_replace_dict = {'>50K': 1, '<=50K': 0,
'>50K.': 1, '<=50K.': 0}
y_train = df['label'].replace(label_replace_dict).values
y_test = df_test['label'].replace(label_replace_dict).values
assert set(y_train) == set([0, 1])
assert set(y_test) == set([0, 1])
x_train = df.iloc[:, :-1]
x_test = df_test.iloc[:, :-1]
# need to one-hot encode
# pd.dummies does this, it is also smart about identifying categorical columns
x_train = pd.get_dummies(x_train, drop_first=True)
x_test = pd.get_dummies(x_test, drop_first=True)
# need to make sure they have exactly the same columns
missing_in_test = set(x_train.columns).difference(set(x_test.columns))
print('Inserting columns into test:', missing_in_test)
for col in missing_in_test:
x_test[col] = 0
missing_in_train = set(x_test.columns).difference(set(x_train.columns))
print('Inserting columns into train:', missing_in_train)
for col in missing_in_train:
x_train[col] = 0
assert set(x_test.columns) == set(x_train.columns)
# now put them in the same order
x_test = x_test[x_train.columns]
assert np.all(x_train.columns == x_train.columns)
# now convert to features
x_train = x_train.values
x_test = x_test.values
x_train_orig = x_train.copy()
# do public/private split
x_train, y_train, x_test, y_test = public_private_split('adult', data_privacy,
x_train, y_train,
x_test, y_test)
# now we need to normalise this
# rescale to 0-1 first
col_mins = x_train_orig.min(axis=0)
col_maxs = x_train_orig.max(axis=0)
col_ranges = col_maxs - col_mins
good_columns = (col_ranges > 0)
del x_train_orig
# now normalise
x_train, x_test = min_max_rescale(x_train, x_test, good_columns=good_columns)
# pca, if pca
if pca:
print('doing PCA!')
transformer = PCA(n_components=50)
transformer.fit(x_train)
# transform everything
x_train = transformer.transform(x_train)
x_test = transformer.transform(x_test)
# now project to sphere
print('Projecting to sphere...')
x_train = x_train / np.linalg.norm(x_train, axis=1).reshape(-1, 1)
x_test = x_test / np.linalg.norm(x_test, axis=1).reshape(-1, 1)
assert np.all(np.abs(np.linalg.norm(x_train, axis=1) - 1) < 1e-6)
assert np.all(np.abs(np.linalg.norm(x_test, axis=1) - 1) < 1e-6)
# double-check sizes
assert x_train.shape[0] == y_train.shape[0]
assert x_test.shape[0] == y_test.shape[0]
assert x_train.shape[1] == x_test.shape[1]
# now save
data = {'x_train': x_train,
'x_test': x_test,
'y_train': y_train,
'y_test': y_test}
print('Saving...')
np.save(path, data)
return x_train, y_train, x_test, y_test
def solve_with_linear_regression(dataset, replace_index=None):
"""
assuming linear regression (mse loss, linear model) on dataset, compute the optimum value and the hessian at that point (on the test data)
"""
x, y, _, _, _, _ = load_data(dataset, replace_index=replace_index)
# for linear regression, the hessian is constant (although dependent on the data ofc)
N, d = x.shape
# have to add a column onto x to account for the bias in the linear model
bias = np.ones((N, 1))
x = np.hstack([x, bias])
hessian = (2.0/N)*np.dot(x.T, x)
assert hessian.shape[0] == hessian.shape[1]
assert hessian.shape[0] == d + 1
# optimum = np.dot(np.linalg.inv(hessian), np.dot(x.T, y))
optimum = np.dot(np.linalg.inv(np.dot(x.T, x)), np.dot(x.T, y))
# report the loss
mse = np.mean((np.dot(x, optimum) - y)**2)
print(mse)
return optimum, hessian
def compute_JS_distance(samples_A, samples_B, bins='auto'):
"""
Assuming samples_A and samples_B are samples from distributions A and B,
compute the (approximate) JS distance between them by:
- converting each set of samples to a histogram defined over the same discretised space (with granularity given by bins)
- computing the relative entropy both ways
WARNING: not sure how the sensitivity to granularity may impact results here
"""
# convert both to empirical PMFs
hist_A, bin_edges = np.histogram(samples_A, density=True, bins=bins)
hist_B, bin_edges_B = np.histogram(samples_B, bins=bin_edges, density=True)
assert np.array_equal(bin_edges, bin_edges_B)
# get the middle distribution
hist_M = 0.5*(hist_A + hist_B)
# compute the KL divergence both ways
KL_AM = entropy(hist_A, hist_M)
KL_BM = entropy(hist_B, hist_M)
# now get the JS
JS = 0.5*(KL_AM + KL_BM)
return JS
def compute_cosine_distances_for_dataset(data_type):
"""
compute the cosine distance between two samples of a dataset
(assuming training data!)
focusing on
"""
path = os.path.join('data', data_type + '.cosine_distances.npy')
try:
data = np.load(path, allow_pickle=True).item()
pairs = data['pairs']
distances = data['distances']
print('Loaded from file')
except FileNotFoundError:
x, y, _, _, _, _ = load_data(data_type, replace_index='NA')
N = x.shape[0]
n_distances = int(N*(N-1)/2)
distances = | np.zeros(n_distances) | numpy.zeros |
# Copyright 2017 Regents of the University of California
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with # the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, time, copy, collections, math, json
import numpy as np
import scipy as sp
import matplotlib
from matplotlib import pyplot as plt
import llops as yp
# Custom scale bar object
from matplotlib_scalebar.scalebar import ScaleBar
# Libwallerlab imports
from llops import display
from llops import Roi
class StopAndStareAcquisition():
# Initialization
def __init__(self, hardware_controller_list, system_metadata,
illumination_type='bf',
illumination_sequence=None,
frame_spacing_mm=1,
object_size_mm=(0.5, 0.5),
reuse_illumination_sequence=True,
max_exposure_time_s=2,
exposure_time_pad_s=0.0,
velocity_mm_s=None,
exposure_time_s=None,
debug=False,
trigger_mode='software',
motion_acceleration_mm_s_2=1e3,
flip_pathway=False,
acquisition_timeout_s=3,
illumination_na_pad=0.03,
illumination_color={'w': 127},
settle_time_s=0):
# Parse options
self.illumination_type = illumination_type
self.settle_time_s = settle_time_s
self.object_size_mm = object_size_mm
self.frame_spacing_mm = frame_spacing_mm
self.flip_pathway = flip_pathway
self.exposure_time_pad_s = exposure_time_pad_s
self.debug = debug
self.motion_acceleration_mm_s_2 = motion_acceleration_mm_s_2
self.velocity_mm_s = velocity_mm_s
self.max_exposure_time_s = max_exposure_time_s
self.illumination_na_pad = illumination_na_pad
self.illumination_color = illumination_color
self.acquisition_timeout_s = acquisition_timeout_s
# Define controller objects, which act as hardware interfaces.
# These should be in an ordered dictionary because the order which they
# are initialized matters when using a mix of hardware and software triggering.
self.hardware_controller_list = collections.OrderedDict()
# First add hardware triggered elements so they perform their set-up before we trigger software elements
for controller in hardware_controller_list:
if controller.trigger_mode is 'hardware':
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Then, add software triggered elements
for controller in hardware_controller_list:
if controller.trigger_mode is 'software':
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Check to be sure a sequence acquisition is not running
assert 'camera' in self.hardware_controller_list, 'Did not find camera controller!'
# Store metadata object
self.metadata = system_metadata
# Ensure we have all necessary metadata for basic acquisition
assert self.metadata.objective.na is not None, 'Missing objective.na in metadata.'
assert self.metadata.objective.mag is not None, 'Missing objective.mag in metadata.'
assert self.metadata.camera.pixel_size_um is not None, 'Missing pixel size in metadata.'
# Update effective pixel size (for scale bar)
self.metadata.system.eff_pixel_size_um = self.metadata.camera.pixel_size_um / (self.metadata.objective.mag * self.metadata.system.mag)
# Trigger Constants
self.TRIG_MODE_EVERY_PATTERN = 1
self.TRIG_MODE_ITERATION = -1
self.TRIG_MODE_START = -2
# Frame state time sequence, will default to a sequence of one exposure time per frame if left as None
self.time_sequence_s = None
self.exposure_time_s = None
self.hardware_sequence_timing = None
# Turn off fast sequencing for illumination by default since this is only avaolable with certain LED arrays
if 'illumination' in self.hardware_controller_list:
self.hardware_controller_list['illumination'].use_fast_sequence = False
# print(type(self.))
self.metadata.type = 'stop and stare'
assert 'illumination' in self.hardware_controller_list, 'Stop and Stare acquisition requires programmable light source'
assert 'position' in self.hardware_controller_list, 'Stop and Stare acquisition requires programmable positioning device'
# Generate motion pathway
self.hardware_controller_list['position'].state_sequence = self.genStopAndStarePathwayRaster(
self.object_size_mm, self.frame_spacing_mm)
# Generate illumination sequence
illuminaiton_pattern_sequence = [self.illumination_type] * \
len(self.hardware_controller_list['position'].state_sequence)
self.hardware_controller_list['illumination'].state_sequence = self.genMultiContrastSequence(
illuminaiton_pattern_sequence)
# Tell device not to use feedback
self.hardware_controller_list['illumination'].trigger_wait_flag = False
self.hardware_controller_list['illumination'].command('trs.0.500.0')
self.hardware_controller_list['illumination'].command('trs.1.500.0')
self.hardware_controller_list['position'].goToPosition((0,0))
self.hardware_controller_list['position'].command('ENCODER X 1')
self.hardware_controller_list['position'].command('ENCODER Y 1')
self.hardware_controller_list['position'].command('ENCW X 100')
self.hardware_controller_list['position'].command('ENCW Y 100')
def acquire(self, exposure_time_ms=50):
# Allocate memory for frames
if self.hardware_controller_list['camera'].isSequenceRunning():
self.hardware_controller_list['camera'].sequenceStop()
self.hardware_controller_list['camera'].setBufferSizeMb(
20 * len(self.hardware_controller_list['position'].state_sequence))
# Set camera exposure
self.hardware_controller_list['camera'].setExposure(exposure_time_ms / 1e3)
self.hardware_controller_list['camera'].setTriggerMode('hardware')
self.hardware_controller_list['camera'].runSequence()
self.hardware_controller_list['illumination'].bf()
# Snap one image to ensure all acquisitons are started
self.hardware_controller_list['camera'].snap()
# generate frame_list
t0 = time.time()
frames_acquired = 0
frame_list = []
for frame in yp.display.progressBar(self.hardware_controller_list['position'].state_sequence, name='Frames Acquired'):
pos = frame['states']
x = pos[0][0]['value']['x']
y = pos[0][0]['value']['y']
self.hardware_controller_list['position'].goToPosition((x, y), blocking=True)
time.sleep(self.settle_time_s)
frame_list.append(self.hardware_controller_list['camera'].snap())
frames_acquired += 1
# print('Acquired %d of %d frames' % (frames_acquired, len(self.hardware_controller_list['position'].state_sequence)))
t_acq_sns = time.time() - t0
print("Acquisition took %.4f seconds" % (t_acq_sns))
# Create dataset
from htdeblur.mddataset import MotionDeblurDataset
dataset = MotionDeblurDataset()
# Assign acquisition time
self.metadata.acquisition_time_s = t_acq_sns
# Apply simple geometric transformations
if self.metadata.camera.transpose:
frame_list = frame_list.transpose(0, 2, 1)
if self.metadata.camera.flip_x:
frame_list = np.flip(frame_list, 2)
if self.metadata.camera.flip_y:
frame_list = np.flip(frame_list, 1)
# Assign
dataset.frame_list = [frame for frame in frame_list]
# Set frame state list
self.n_frames = len(self.hardware_controller_list['position'].state_sequence)
frame_state_list = []
for frame_index in range(self.n_frames):
single_frame_state_list = {}
# Loop over hardware controllers and record their state sequences
for hardware_controller_name in self.hardware_controller_list:
hardware_controller = self.hardware_controller_list[hardware_controller_name]
if hardware_controller.state_sequence is not None:
single_frame_state_list[hardware_controller_name] = hardware_controller.state_sequence[frame_index]
# Record time_sequence_s
single_frame_state_list['time_sequence_s'] = [0]
# Add to list of all frames
frame_state_list.append(single_frame_state_list)
dataset.metadata = self.metadata
dataset.type = 'stop_and_stare'
dataset.frame_state_list = frame_state_list
return dataset
def genStopAndStarePathwayRaster(self, object_size_mm, frame_spacing_mm, major_axis=1, include_minor_axis=False):
# Determine major axis
if major_axis is None:
major_axis = np.argmax(np.asarray(object_size_mm))
if object_size_mm[0] == object_size_mm[1]:
major_axis = 1
# Detemine number of measurements
measurement_count = np.ceil(np.asarray(object_size_mm) / np.asarray(frame_spacing_mm)
).astype(np.int) # two components in x and y
# Determine slightly smaller frame spacing for optimal coverage of object
frame_spacing_mm = (object_size_mm[0] / measurement_count[0], object_size_mm[1] / measurement_count[1])
# Error checking
assert np.any(measurement_count > 1), "image_size must be smaller than object_size!"
print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1]))
# This variable will be populated by the loop below
raster_segments = np.zeros((measurement_count[0] * 2, 2))
# Generate raster points
raster_end_point_list = []
pathway = []
linear_segment_index = 0 # This variable keeps track of linear segments, for use with path planning
for row in np.arange(measurement_count[0]):
if row % 2 == 0:
for index, col in enumerate(range(measurement_count[1])):
# Add pathway to list
pathway.append({'x_start': frame_spacing_mm[1] * col,
'y_start': frame_spacing_mm[0] * row,
'x_end': frame_spacing_mm[1] * col,
'y_end': frame_spacing_mm[0] * row,
'linear_segment_index': linear_segment_index})
else:
for index, col in enumerate(reversed(range(measurement_count[1]))):
# Add pathway to list
frame_spacing_mm[0] * row
pathway.append({'x_start': frame_spacing_mm[1] * col,
'y_start': frame_spacing_mm[0] * row,
'x_end': frame_spacing_mm[1] * col,
'y_end': frame_spacing_mm[0] * row,
'linear_segment_index': linear_segment_index})
linear_segment_index += 1
# make the center the mean of the pathway
path_means = []
for path in pathway:
path_mean = ((path['y_start']), (path['x_start']))
path_means.append(path_mean)
# mean = np.sum(np.asarray(path_means), axis=1) / len(path_means)
mean = np.sum(np.asarray(path_means), axis=0) / len(path_means)
for path in pathway:
path['x_start'] -= mean[1]
path['x_end'] -= mean[1]
path['y_start'] -= mean[0]
path['y_end'] -= mean[0]
# return pathway
state_sequence = []
for path in pathway:
# Store common information about this frame
common_state_dict = {}
common_state_dict['frame_time'] = self.hardware_controller_list['camera'].getExposure()
common_state_dict['led_update_rate_us'] = None
common_state_dict['linear_segment_index'] = None
common_state_dict['frame_distance'] = 0
common_state_dict['exposure_distance'] = 0
common_state_dict['velocity'] = self.velocity_mm_s
common_state_dict['acceleration'] = self.motion_acceleration_mm_s_2
common_state_dict['n_blur_positions_exposure'] = 1
common_state_dict['position_delta_x_mm'] = 0
common_state_dict['position_delta_y_mm'] = 0
path_dict = {'value': {'time_index' : 0,
'x': path['x_start'],
'y': path['y_start']}}
state_sequence.append({'states' : [[path_dict]], 'common' : common_state_dict})
return(state_sequence)
def plotPathway(self):
sequence_list = self.hardware_controller_list['position'].state_sequence
point_list_start = []
point_list_end = []
for sequence in sequence_list:
start_pos = (sequence['states'][0][0]['value']['x'], sequence['states'][0][0]['value']['y'])
end_pos = (sequence['states'][-1][0]['value']['x'], sequence['states'][-1][0]['value']['y'])
point_list_start.append(start_pos)
point_list_end.append(end_pos)
point_list_start = np.asarray(point_list_start)
point_list_end = np.asarray(point_list_end)
plt.figure()
for index in range(len(point_list_start)):
plt.scatter(point_list_start[index, 0], point_list_start[index, 1], c='b')
plt.scatter(point_list_end[index, 0], point_list_end[index, 1], c='r')
plt.plot([point_list_start[index, 0], point_list_end[index, 0]],
[point_list_start[index, 1], point_list_end[index, 1]], c='y')
plt.xlabel('Position X (mm)')
plt.ylabel('Position Y (mm)')
plt.title('Pathway (b is start, y/o is end)')
plt.gca().invert_yaxis()
def genMultiContrastSequence(self, illumination_pattern_sequence, n_acquisitions=1,
darkfield_annulus_width_na=0.1):
led_list = np.arange(self.metadata.illumination.state_list.design.shape[0])
bf_mask = self.metadata.illumination.state_list.design[:, 0] ** 2 \
+ self.metadata.illumination.state_list.design[:, 1] ** 2 < (
self.metadata.objective.na + self.illumination_na_pad) ** 2
led_list_bf = led_list[bf_mask]
led_list_df = led_list[~bf_mask]
led_list_an = led_list[~bf_mask & (self.metadata.illumination.state_list.design[:, 0] ** 2
+ self.metadata.illumination.state_list.design[:, 1] ** 2 < (self.metadata.objective.na + darkfield_annulus_width_na) ** 2)]
illumination_sequence = []
self.pattern_type_list = []
pattern_dict = {'dpc.top': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 1] > 0]),
'dpc.bottom': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 1] < 0]),
'dpc.left': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 0] > 0]),
'dpc.right': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 0] < 0]),
'single': [0],
'bf': np.ndarray.tolist(led_list_bf),
'df': np.ndarray.tolist(led_list_df),
'an': np.ndarray.tolist(led_list_an),
'full': np.ndarray.tolist(led_list)
}
# DPC does not flicker patterns within frames
n_time_points_per_frame = 1
illumination_state_list = []
# Write image sequence to list
for acquisition_index in range(n_acquisitions):
# Loop over DPC patterns (frames)
for frame_index, pattern in enumerate(illumination_pattern_sequence):
single_frame_state_list_illumination = []
# Loop over time points (irrelevent for dpc)
for time_index in range(n_time_points_per_frame):
time_point_state_list = []
# Loop over DPC patterns (which are themselves frames)
for led_idx in pattern_dict[pattern]:
values_dict = {}
for color_name in self.illumination_color:
values_dict[color_name] = self.illumination_color[color_name]
led_dict = {
'index': int(led_idx),
'time_index': 0,
'value': values_dict
}
# Append this to list with elements for each interframe time point
time_point_state_list.append(led_dict)
# Append to frame_dict
single_frame_state_list_illumination.append(time_point_state_list)
# Define illumination sequence
illumination_state_list.append({'states' : single_frame_state_list_illumination, 'common' : {}})
# Define illumination list
self.state_list = self.metadata.illumination.state_list.design
return illumination_state_list
class MotionDeblurAcquisition():
# Initialization
def __init__(self, hardware_controller_list, system_metadata,
illumination_sequence=None,
motion_path_type='linear',
use_l1_distance_for_motion_calculations=True,
blur_vector_method='pseudo_random',
kernel_pulse_count=150,
saturation_factor=1.0,
frame_spacing_mm=1,
object_size_mm=(0.5, 0.5),
reuse_illumination_sequence=True,
max_exposure_time_s=2,
max_velocity_mm_s=40.0,
max_led_update_rate_us=0.01,
exposure_time_pad_s=0.0,
velocity_mm_s=None,
exposure_time_s=None,
debug=False,
motion_acceleration_mm_s_2=1e3,
extra_run_up_time_s=0,
flip_pathway=False,
segment_delay_s=0,
initial_auto_exposure=False,
acquisition_timeout_s=3,
illumination_sequence_count=1,
illumination_na_pad=0.03,
illumination_color={'w': 127},
only_store_first_and_last_position=True):
# Parse options
self.motion_path_type = motion_path_type
self.object_size_mm = object_size_mm
self.frame_spacing_mm = frame_spacing_mm
self.flip_pathway = flip_pathway
self.use_l1_distance_for_motion_calculations = use_l1_distance_for_motion_calculations
self.velocity_mm_s = velocity_mm_s
self.exposure_time_pad_s = exposure_time_pad_s
self.debug = debug
self.motion_acceleration_mm_s_2 = motion_acceleration_mm_s_2
self.max_led_update_rate_us = max_led_update_rate_us
self.max_exposure_time_s = max_exposure_time_s
self.max_velocity_mm_s = max_velocity_mm_s
self.illumination_na_pad = illumination_na_pad
self.saturation_factor = saturation_factor
self.reuse_illumination_sequence = reuse_illumination_sequence
self.blur_vector_method = blur_vector_method
self.kernel_pulse_count = kernel_pulse_count
self.illumination_color = illumination_color
self.extra_run_up_time_s = extra_run_up_time_s
self.initial_auto_exposure = initial_auto_exposure
self.acquisition_timeout_s = acquisition_timeout_s
self.segment_delay_s = segment_delay_s
self.only_store_first_and_last_position = only_store_first_and_last_position
self.illumination_sequence = illumination_sequence
self.illumination_sequence_count = illumination_sequence_count
# Define controller objects, which act as hardware interfaces.
# These should be in an ordered dictionary because the order which they
# are initialized matters when using a mix of hardware and software triggering.
self.hardware_controller_list = collections.OrderedDict()
# First add hardware triggered elements so they perform their set-up before we trigger software elements
for controller in hardware_controller_list:
if hasattr(controller, 'trigger_mode'):
if controller.trigger_mode is 'hardware':
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Then, add software triggered elements
for controller in hardware_controller_list:
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Check to be sure a sequence acquisition is not running
assert 'camera' in self.hardware_controller_list, 'Did not find camera controller!'
# Store metadata object
self.metadata = system_metadata
# Ensure we have all necessary metadata for basic acquisition
assert self.metadata.objective.na is not None, 'Missing objective.na in metadata.'
assert self.metadata.objective.mag is not None, 'Missing objective.mag in metadata.'
assert self.metadata.camera.pixel_size_um is not None, 'Missing pixel size in metadata.'
# Update effective pixel size (for scale bar)
self.metadata.system.eff_pixel_size_um = self.metadata.camera.pixel_size_um / (self.metadata.objective.mag * self.metadata.system.mag)
# Trigger Constants
self.TRIG_MODE_EVERY_PATTERN = 1
self.TRIG_MODE_ITERATION = -1
self.TRIG_MODE_START = -2
# Frame state time sequence, will default to a sequence of one exposure time per frame if left as None
self.time_sequence_s = None
self.exposure_time_s = None
self.hardware_sequence_timing = None
# Turn off fast sequencing for illumination by default since this is only avaolable with certain LED arrays
if 'illumination' in self.hardware_controller_list:
self.hardware_controller_list['illumination'].use_fast_sequence = False
# Set metadata type
self.metadata.type = 'motiondeblur'
assert 'illumination' in self.hardware_controller_list, 'Motion deblur object requires programmable light source'
assert 'position' in self.hardware_controller_list, 'Motion deblur object requires motion stage'
# Initialize state_sequence
self.state_sequence = []
# Generate position sequence
self.hardware_controller_list['position'].state_sequence, self.time_sequence_s = self.genMotionPathway(
pathway_type=self.motion_path_type, frame_spacing_mm=frame_spacing_mm)
# Generate illumination sequence
self.hardware_controller_list['illumination'].state_sequence = self.genMotionIlluminationSequenceRandom(illumination_sequence=illumination_sequence,
sequence_count=self.illumination_sequence_count)
# Set up subframe captures
self.subframe_capture_count = len(self.hardware_controller_list['illumination'].state_sequence[0])
self.force_preload_all_frames = True
self.hardware_controller_list['position'].continuous_states_between_frames = True
# Configure illuination to use fast sequence updating if specified in options
self.hardware_controller_list['illumination'].use_fast_sequence = True
# Set bit depth
self.illumination_sequence_bit_depth = 1
# Set extra options for position controller
self.hardware_controller_list['position'].extra_run_up_time_s = self.extra_run_up_time_s
# Calculate effective pixel size if it hasn't already been calculated
self.metadata.system.eff_pixel_size_um = self.metadata.camera.pixel_size_um / \
(self.metadata.objective.mag * self.metadata.system.mag)
def preAcquire(self):
''' This method sets up the camera for an acquisition '''
# Check that the length of motion, illuimination, pupil, and focal sequences are same (or None)
frame_counts = []
for hardware_controller_name in list(self.hardware_controller_list):
# Get controller object from dictionary
hardware_controller = self.hardware_controller_list[hardware_controller_name]
if hardware_controller.state_sequence is not None:
# Reset Controller
hardware_controller.reset()
# Get number of frames in sequence. If there is no sequence, remove this element from hw_controller_list
if hardware_controller.type is not 'camera':
if hardware_controller.state_sequence is not None:
frame_counts.append(len(hardware_controller.state_sequence))
else:
self.hardware_controller_list.pop(hardware_controller_name)
else:
# Remove this controller from the list
if hardware_controller_name is not 'camera':
del self.hardware_controller_list[hardware_controller_name]
# Turn on hardware triggering for initialization
self.hardware_controller_list['camera'].setTriggerMode('hardware')
# Set illumination parameters
if 'illumination' in self.hardware_controller_list:
# self.hardware_controller_list['illumination'].setColor(self.illumination_color)
self.hardware_controller_list['illumination'].setSequenceBitDepth(
self.illumination_sequence_bit_depth)
# Ensure all hardware elements have the same number of frames
if len(frame_counts) > 0:
if not np.sum(np.mean(np.asarray(frame_counts)) == np.asarray(frame_counts)) == len(frame_counts):
raise ValueError('Sequence lengths are not the same (or None).')
else:
self.n_frames = frame_counts[0]
else:
raise ValueError('No sequence provided!')
# Initialize frame_list
self.frame_list = np.zeros((self.n_frames,
self.hardware_controller_list['camera'].getImageHeight(), self.hardware_controller_list['camera'].getImageWidth()), dtype=np.uint16)
# Apply simple geometric transformations
if self.metadata.camera.transpose:
self.frame_list = self.frame_list.transpose(0, 2, 1)
if self.metadata.camera.flip_x:
self.frame_list = np.flip(self.frame_list, 2)
if self.metadata.camera.flip_y:
self.frame_list = np.flip(self.frame_list, 1)
# Generate frame_state_list
frame_state_list = []
if self.time_sequence_s is None:
self.time_sequence_s = []
for _ in range(self.n_frames):
self.time_sequence_s.append([0])
# Loop over frames
for frame_index in range(self.n_frames):
single_frame_state_list = {}
# Loop over hardware controllers and record their state sequences
for hardware_controller_name in self.hardware_controller_list:
hardware_controller = self.hardware_controller_list[hardware_controller_name]
if hardware_controller.state_sequence is not None:
single_frame_state_list[hardware_controller_name] = hardware_controller.state_sequence[frame_index]
# Record time_sequence_s
single_frame_state_list['time_sequence_s'] = self.time_sequence_s[frame_index]
# Add to list of all frames
frame_state_list.append(single_frame_state_list)
self.frame_state_list = frame_state_list
# Perform auto-exposure if user desires
if self.initial_auto_exposure:
# Illuminate with first pattern
if 'illumination' in self.hardware_controller_list:
self.hardware_controller_list['illumination'].sequenceReset()
self.hardware_controller_list['illumination'].time_sequence_s = [[0]]
self.hardware_controller_list['illumination'].preloadSequence(0)
self.hardware_controller_list['illumination'].sequenceStep()
# Small delay to ensure illumination gets updated
time.sleep(0.1)
# Run Auto-Exposure
self.hardware_controller_list['camera'].autoExposure()
# Set camera memory footprint
if (self.hardware_controller_list['camera'].getBufferTotalCapacity() < self.frame_list.shape[0]):
self.frame_size_mb = int(
np.ceil(float(self.frame_list.shape[0] / 1e6) * float(self.frame_list.shape[1]) * float(self.frame_list.shape[2]) * 2))
print('Allocating %dmb for frames' % self.frame_size_mb)
self.hardware_controller_list['camera'].setBufferSizeMb(self.frame_size_mb)
assert self.hardware_controller_list['camera'].getBufferTotalCapacity(
) >= self.frame_list.shape[0], 'Buffer size too small!'
# Store initial time (acquisition start)
t0 = time.time()
# Tell camera to start waiting for frames
self.hardware_controller_list['camera'].runSequence()
# Keep track of how many images we have acquired
self.total_frame_count = 0
def acquire(self,
dataset=None,
reset_devices=False):
'''
This is a generic acquisition class, where LEDs are updated according to the sequence variable.
'''
# Call preacquire. which initializes hardware and variables
self.preAcquire()
# Determine which frames can be preloaded before serial acquisition. If each frame is only one state, we assume that we can preload all frames. But, if the state of any hardware element changes within any frame, we will assume we can't preload the frames
frame_count = 0
linear_segment_list = []
for frame_state in self.hardware_controller_list['position'].state_sequence:
if frame_state['common']['linear_segment_index'] >= 0:
frame_count += 1
if frame_state['common']['linear_segment_index'] not in linear_segment_list:
linear_segment_list.append(frame_state['common']['linear_segment_index'])
print("Found %d segments and %d frames" % (len(linear_segment_list), frame_count))
t_start = time.time()
for linear_segment_index in linear_segment_list:
self.frames_to_acquire = []
# Determine which linear segments to run
for frame_index, frame_state in enumerate(self.hardware_controller_list['position'].state_sequence):
if frame_state['common']['linear_segment_index'] == linear_segment_index:
self.frames_to_acquire += [frame_index]
self.n_frames_to_acquire = len(self.frames_to_acquire)
x_start = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[0]]['states'][0][0]['value']['x']
y_start = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[0]]['states'][0][0]['value']['y']
x_end = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[-1]]['states'][0][0]['value']['x']
y_end = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[-1]]['states'][0][0]['value']['y']
print('Starting linear segment %d which has %d frames moving from (%.4f, %.4f)mm to (%.4f, %.4f)mm' %
(linear_segment_index, self.n_frames_to_acquire, x_start, y_start, x_end, y_end))
frame_has_multiple_states = []
for frame_index in self.frames_to_acquire:
number_of_states_in_current_frame = 0
for hardware_controller_name in self.hardware_controller_list:
if hardware_controller_name is not 'camera' and self.hardware_controller_list[hardware_controller_name].state_sequence is not None:
# Check if this frame can be preloaded (if it has more than one state, it can't be preloaded)
number_of_states_in_current_frame = max(number_of_states_in_current_frame, len(
self.hardware_controller_list[hardware_controller_name].state_sequence[frame_index]['states']))
# Check that the length of time_sequence_s matches the max number of state changes within this frame
if number_of_states_in_current_frame > 1:
frame_has_multiple_states.append(True)
assert self.time_sequence_s is not None, "time_sequence_s can not be None if any frame has multiple states!"
assert len(self.time_sequence_s[frame_index]) == number_of_states_in_current_frame, "time_sequence_s for frame %d is of wrong length!" % len(
self.time_sequence_s[frame_index]['states'])
else:
frame_has_multiple_states.append(False)
# Determine if the entire multi-frame sequence can be preloaded (this will be False if ther eis only one system state (e.g. LED pattern) within each frame)
all_frames_will_be_preloaded = (not any(frame_has_multiple_states)) or self.force_preload_all_frames
# Determine optimal exposure time for all frames
if self.exposure_time_s is not None:
self.hardware_controller_list['camera'].setExposure(self.exposure_time_s)
elif self.time_sequence_s is not None and max(self.time_sequence_s[0]) > 0:
frame_exposures = []
for frame_index in range(self.n_frames_to_acquire):
frame_exposures.append(max(self.time_sequence_s[frame_index]))
self.exposure_time_s = sum(frame_exposures) / (self.n_frames_to_acquire)
self.hardware_controller_list['camera'].setExposure(self.exposure_time_s)
else:
self.exposure_time_s = self.hardware_controller_list['camera'].getExposure()
# Check that exposure time is correct
assert abs(self.exposure_time_s - self.hardware_controller_list['camera'].getExposure(
)) < 1e-3, "Desired exposure time %.2f is not equal to device exposure %.2f. This is probably a MM issue" % (self.exposure_time_s, self.hardware_controller_list['camera'].getExposure())
# print('Using exposure time %.2fs (%d ms)' % (self.exposure_time_s, int(self.exposure_time_s * 1000)))
# Check that time_sequence_s for multiple frames exists if there are inter-frame state changes
if (not any(frame_has_multiple_states)) or self.time_sequence_s is None:
self.time_sequence_s = [self.exposure_time_s]
# Configure hardware triggering
trigger_output_settings = [0, 0]
trigger_input_settings = [0, 0]
for hardware_controller_name in self.hardware_controller_list:
hardware_controller = self.hardware_controller_list[hardware_controller_name]
if hasattr(hardware_controller, 'trigger_mode') and 'hardware' in hardware_controller.trigger_mode:
# Check that trigger pins are configured
assert hardware_controller.trigger_pin is not None, 'Trigger pin must be configured for hardware triggering!'
# Determine if we're performing preloadable acquisitions or not
if self.subframe_capture_count > 1:
if self.reuse_illumination_sequence:
if hardware_controller_name == 'camera':
if self.illumination_sequence_count == 1:
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION
else:
trigger_output_settings[hardware_controller.trigger_pin] = len(self.hardware_controller_list['position'].state_sequence[0]['states']) // self.illumination_sequence_count
trigger_input_settings[hardware_controller.trigger_pin] = len(self.hardware_controller_list['position'].state_sequence[0]['states']) // self.illumination_sequence_count
elif hardware_controller_name == 'position':
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START
else:
if hardware_controller_name == 'camera':
trigger_output_settings[hardware_controller.trigger_pin] = self.subframe_capture_count
trigger_input_settings[hardware_controller.trigger_pin] = self.subframe_capture_count
elif hardware_controller_name == 'position':
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START
# Case where there is only one system state wihtin each frame (trigger each frame)
elif all_frames_will_be_preloaded:
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_EVERY_PATTERN
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_EVERY_PATTERN
# Case where we only want to trigger on first frame. This is probably not a good default.
else:
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION
# Check that this hardware controller is ready for a sequence, if it is sequencable.
if hardware_controller.state_sequence is not None:
# Reset controller sequence to initial state
hardware_controller.sequenceReset()
time.sleep(0.1)
# Wait until initialization is complete
initialization_wait_time = 0
for hardware_controller_name in self.hardware_controller_list:
while not self.hardware_controller_list[hardware_controller_name].isReadyForSequence():
time.sleep(0.05)
initialization_wait_time += 0.05
if initialization_wait_time > self.acquisition_timeout_s:
raise ValueError('Pre-acquisiton isReadyForSequence timeout for %s' % hardware_controller_name)
# Tell the hardware controller about the acquisition time sequence
if len(hardware_controller.state_sequence) == len(self.time_sequence_s):
hardware_controller.time_sequence_s = [self.time_sequence_s[i] for i in self.frames_to_acquire]
else:
hardware_controller.time_sequence_s = [
[self.hardware_controller_list['camera'].getExposure()]] * self.n_frames_to_acquire
# Set up triggering for hardware acquision
self.hardware_controller_list['illumination'].trigger_output_settings = trigger_output_settings
self.hardware_controller_list['illumination'].trigger_input_settings = trigger_input_settings
# Determine which sequences get preloaded
if all_frames_will_be_preloaded: # One system state per acquisition
frame_preload_sequence = [-1] # Preload all frames at once
else:
frame_preload_sequence = range(self.n_frames_to_acquire) # Preload each frame serially
# Loop over frames to capture (may only execute once if we're preloading all frames)
for preload_index in frame_preload_sequence:
# Loop over hardware controllers, preload, and determine necessary exposure time (if using inter-frame state changes)
for hardware_controller_name in self.hardware_controller_list:
# If we're using the motion stage, calculate the mechanical delay
if hardware_controller_name == 'position':
# Get velocity and acceleration from state sequence
if preload_index == -1:
index = 0
else:
index = preload_index
velocity = self.hardware_controller_list[hardware_controller_name].state_sequence[0]['common']['velocity']
acceleration = self.hardware_controller_list[hardware_controller_name].acceleration
jerk = self.hardware_controller_list[hardware_controller_name].jerk
# Calculate spin-up time and distance
# http://www.wolframalpha.com/input/?i=v+%3D+t+*+(a+%2B+0.5*j+*+t)+solve+for+t
# http://www.wolframalpha.com/input/?i=v+%3D+t+*+(a+%2B+(1%2F8)*j+*+t)+solve+for+t
# Good reference:
# http://www.et.byu.edu/~ered/ME537/Notes/Ch5.pdf
# Total period
if False:
# First period (acceleration of acceleration)
t_1 = acceleration / jerk
# x_1 = 1/6 * jerk * t_1 ** 3
x_1 = acceleration ** 2 / (6 * jerk) * t_1
# v_1 = 1/2 * jerk * t_1 ** 2
v_1 = acceleration ** 2 / (2 * jerk)
# Second period (linear region)
dv = velocity - 2 * v_1
assert dv > 0
t_2 = dv / acceleration
x_2 = v_1 * t_2 + 1/2 * acceleration * t_2 ** 2
v_2 = velocity - v_1
# Third period (decelleration of acceleration)
t_3 = acceleration / jerk
x_3 = (v_2 + acceleration ** 2 / (3 * jerk)) * t_3
v_3 = v_1
# Calculate spin-up distance and time
spin_up_time_s = t_1 + t_2 + t_3
spin_up_distance_mm = x_1 + x_2 + x_3
assert (v_1 + v_2 + v_3 - velocity) < 1e-1, "Calculated velocity is %.4f, desired is %.4f" % (v_1 + v_2 + v_3, velocity)
else:
spin_up_time_s = velocity / acceleration
spin_up_distance_mm = 1/2 * acceleration * spin_up_time_s ** 2
# Add extra spin_up time
spin_up_time_s += self.extra_run_up_time_s
spin_up_distance_mm += self.extra_run_up_time_s * velocity
# spin_up_distance_mm = 0
spin_up_time_s = max(spin_up_time_s, 0.0001)
self.hardware_controller_list['illumination'].setupTriggering(self.hardware_controller_list['position'].trigger_pin, int(
self.hardware_controller_list['position'].trigger_pulse_width_us), int(spin_up_time_s * 1e6)) # convert to seconds
# Tell motion stage to offset it's positions by these amounts
self.hardware_controller_list['position'].preload_run_up_distance_mm = spin_up_distance_mm
else:
# no delay for other components
self.hardware_controller_list[hardware_controller_name].trigger_start_delay_s = 0
if hardware_controller_name is not 'camera' and self.hardware_controller_list[hardware_controller_name].state_sequence is not None:
if hardware_controller_name is not 'illumination' or linear_segment_index == 0:
if hardware_controller_name == 'illumination' and self.reuse_illumination_sequence:
self.hardware_controller_list[hardware_controller_name].preloadSequence(0)
else:
state_sequence_used = [
self.hardware_controller_list[hardware_controller_name].state_sequence[i] for i in self.frames_to_acquire]
self.hardware_controller_list[hardware_controller_name].preloadSequence(
preload_index, state_sequence=state_sequence_used)
if preload_index < 0 or self.reuse_illumination_sequence:
frames_to_wait_for = self.n_frames_to_acquire # wait for all frames
else:
frames_to_wait_for = 1
# Set trigger frame time based on first pathway TODO: This is a hack
if 'position' in self.hardware_controller_list:
self.hardware_controller_list['illumination'].trigger_frame_time_s[self.hardware_controller_list['camera']
.trigger_pin] = self.hardware_controller_list['position'].state_sequence[0]['common']['frame_time'] * 1e6
# Tell stage to start moving
self.hardware_controller_list['position'].runSequence()
if linear_segment_index == 0:
t_start = time.time()
# Tell illumination to start moving
if self.reuse_illumination_sequence:
self.hardware_controller_list['illumination'].runSequence(
n_acquisitions=1 * self.n_frames_to_acquire)
else:
self.hardware_controller_list['illumination'].runSequence(n_acquisitions=1)
# Wait for frames to be captured
t_frame = time.time()
frame_count = 0
while frame_count < frames_to_wait_for:
if self.total_frame_count + frame_count == frames_to_wait_for:
break
else:
if self.total_frame_count + frame_count == self.hardware_controller_list['camera'].getBufferSizeFrames():
time.sleep(0.01)
if (time.time() - t_frame) > self.acquisition_timeout_s:
print(self.hardware_controller_list['illumination'].response())
raise ValueError('Acquisition timeout (Total frame count: %d, Buffer size: %d, preload index %d, frames to wait for: %d)' % (
self.total_frame_count, self.hardware_controller_list['camera'].getBufferSizeFrames(), preload_index, frames_to_wait_for))
else:
if ((self.total_frame_count + frame_count) % int((self.n_frames) / min(10, self.n_frames_to_acquire))) == 0:
print('Acquired %d of %d frames' % (
self.hardware_controller_list['camera'].getBufferSizeFrames(), self.n_frames_to_acquire))
frame_count = self.hardware_controller_list['camera'].getBufferSizeFrames(
) - self.total_frame_count
self.total_frame_count = self.hardware_controller_list['camera'].getBufferSizeFrames()
t_frame = time.time()
# Get sequence timing information
time.sleep(0.1)
print(self.hardware_controller_list['illumination'].response())
# Wait for hardware to stop
for hardware_controller_name in self.hardware_controller_list:
while not self.hardware_controller_list[hardware_controller_name].isReadyForSequence():
time.sleep(0.05)
self.sequence_timing_dict = {}
# Reset sequences
for hardware_controller_name in self.hardware_controller_list:
if hardware_controller_name is not 'camera':
self.hardware_controller_list[hardware_controller_name].sequenceReset()
# Let user know we're finished
print('Finished linear segment %d' % linear_segment_index)
time.sleep(self.segment_delay_s)
t_acq = time.time() - t_start
self.metadata.acquisition_time_s = t_acq
print("Acquisition took %.4f seconds" % (t_acq))
# Call post-acquire functions
dataset = self.postAcquire(dataset=dataset, reset_devices=reset_devices)
# Return
return dataset
def postAcquire(self, dataset=None, reset_devices=True):
"""Post-acquisition steps for resetting hardware and preparing dataset."""
# Stop acquisition
# self.hardware_controller_list['camera'].sequenceStop()
# Parse dataset
if dataset is None:
from htdeblur.mddataset import MotionDeblurDataset
dataset = MotionDeblurDataset()
# Read frames and timestamps from buffer
(self.frame_list, elapsed_frame_time_ms) = self.hardware_controller_list['camera'].readFramesFromBuffer()
# Apply simple geometric transformations
if self.metadata.camera.transpose:
self.frame_list = self.frame_list.transpose(0, 2, 1)
if self.metadata.camera.flip_x:
self.frame_list = np.flip(self.frame_list, 2)
if self.metadata.camera.flip_y:
self.frame_list = np.flip(self.frame_list, 1)
# Let user know we're finished
print('Read frames from buffer.')
# Store camera timing in a standardized timing dict
self.sequence_timing_dict = {}
self.sequence_timing_dict['sequence_timing'] = []
for frame_index, frame_time in enumerate(elapsed_frame_time_ms):
timing_dict = {'trigger_number' : 0, 'acquisition_number' : frame_index, 'camera_start_time_us' : frame_time * 1000}
self.sequence_timing_dict['sequence_timing'].append(timing_dict)
# Reset all hardware elements
if reset_devices:
for hardware_controller_name in self.hardware_controller_list:
self.hardware_controller_list[hardware_controller_name].reset()
if self.only_store_first_and_last_position:
for frame_state in self.frame_state_list[1:]:
frame_state['position']['states'] = [frame_state['position']['states'][0], frame_state['position']['states'][-1]]
# Remove repeated illumination patterns and time_sequence_s if we used the same illumination for each pulse
if self.reuse_illumination_sequence:
for frame_state in self.frame_state_list[1:]:
frame_state['time_sequence_s'] = 'see_frame_#1'
frame_state['illumination'] = 'see_frame_#1'
# Illuminate with brightfield to indicate we're Finished
self.hardware_controller_list['illumination'].bf()
self.hardware_controller_list['position'].goToPosition((0,0))
# Save results to an itoools.Dataset object
dataset.frame_list = self.frame_list
dataset.frame_state_list = self.frame_state_list
dataset.metadata = self.metadata
dataset.type = 'motion_deblur'
# Return
return dataset
def genMotionPathway(self, n_acquisitions=1, pathway_type='raster', frame_spacing_mm=1.):
'''
This function generates a few example motion pathways.
'''
if pathway_type is 'raster':
pathway = self.genMotionPathwayRaster(self.object_size_mm, self.frame_spacing_mm)
elif (pathway_type is 'linear') or (pathway_type is 'linear_x'):
# predefine linear y sequence
n_frames = int(math.ceil(self.object_size_mm[1] / self.frame_spacing_mm[1]))
pathway = []
for frame_index in range(n_frames):
pathway.append({'x_start': frame_index * self.frame_spacing_mm[1],
'x_end': (frame_index + 1) * self.frame_spacing_mm[1],
'y_start': 0, 'y_end': 0, 'linear_segment_index': 0})
elif pathway_type in ['linear_y']:
# predefine linear y sequence
n_frames = int(np.ceil(self.object_size_mm[0] / self.frame_spacing_mm[0]))
pathway = []
for frame_index in range(n_frames):
pathway.append({'y_start': -frame_index * self.frame_spacing_mm[0],
'y_end': -(frame_index + 1) * self.frame_spacing_mm[0],
'x_start': 0, 'x_end': 0, 'linear_segment_index': 0})
elif pathway_type is 'linear_diag':
# predefine linear y sequence
n_frames = int(np.ceil(self.object_size_mm[0] / self.frame_spacing_mm[0]))
pathway = []
for frame_index in range(n_frames):
pathway.append({'y_start': frame_index * self.frame_spacing_mm[0],
'y_end': (frame_index + 1) * self.frame_spacing_mm[0],
'x_start': frame_index * self.frame_spacing_mm[0],
'x_end': (frame_index + 1) * self.frame_spacing_mm[0],
'linear_segment_index': 0})
else:
raise ValueError('Pathway type %s is not implemented.' % pathway_type)
# make the center the mean of the pathway
path_xmin = 1e8
path_ymin = 1e8
path_xmax = -1e8
path_ymax = -1e8
for path in pathway:
path_mean = ((path['y_start']), (path['y_start']))
path_xmin = min(path_xmin, min([path['x_start'], path['x_end']]))
path_xmax = max(path_xmax, max([path['x_start'], path['x_end']]))
path_ymin = min(path_ymin, min([path['y_start'], path['y_end']]))
path_ymax = max(path_ymax, max([path['y_start'], path['y_end']]))
mean = ((path_ymax + path_ymin) / 2, (path_xmax + path_xmin) / 2)
for path in pathway:
path['x_start'] = path['x_start'] - mean[1]
path['x_end'] = path['x_end'] - mean[1]
path['y_start'] = path['y_start'] - mean[0]
path['y_end'] = path['y_end'] - mean[0]
# Flip pathway if user desired
if self.flip_pathway:
for path in pathway:
path['x_start'] *= -1
path['x_end'] *= -1
path['y_start'] *= -1
path['y_end'] *= -1
position_state_list = []
time_sequence_s = []
# Write image sequence to list
for acquisition_index in range(n_acquisitions):
# Loop over DPC patterns (frames)
for frame_index, position in enumerate(pathway):
# define distance in terms of l1 or l2 distance
distance_l2 = float(np.sqrt((position['x_end'] - position['x_start'])
** 2 + (position['y_end'] - position['y_start']) ** 2))
distance_l1 = float(abs(position['x_end'] - position['x_start']) +
abs(position['y_end'] - position['y_start']))
if self.use_l1_distance_for_motion_calculations:
position['frame_distance'] = int(round(distance_l1 * 1000)) / 1000 # round to nearest um
else:
position['frame_distance'] = int(round(distance_l2 * 1000)) / 1000 # round to nearest um
# Determine number of qunatifiable positions in pathway
position['n_blur_positions_frame'] = int(
math.floor(position['frame_distance'] / (self.metadata.system.eff_pixel_size_um / 1000)))
# Determine necessary velocity
if self.velocity_mm_s is not None:
position['velocity_mm_s'] = self.velocity_mm_s
else:
position['velocity_mm_s'] = self.max_velocity_mm_s # Use fastest speed possible
# Calculate time between frames
position['frame_time_s'] = position['frame_distance'] / position['velocity_mm_s'] # t = x / v
# Determine camera exposure time for this frame
position['exposure_time_s'] = int(math.floor((self.hardware_controller_list['camera'].calcExposureTimeFromBusyTime(
position['frame_time_s']) - self.exposure_time_pad_s) * 1000)) / 1000 # round to nearest ms
# Determine LED update rate
dx_pixel = position['frame_distance'] / position['n_blur_positions_frame']
dt_pixel_raw = dx_pixel / position['velocity_mm_s']
position['led_update_rate_us'] = math.ceil(dt_pixel_raw * 1e6) # Round up to integer us
# Determine new velocity (ps / update rate)
new_velocity_mm_s = (self.metadata.system.eff_pixel_size_um / 1e3) / (position['led_update_rate_us'] / 1e6)
if self.debug > 0:
print('Reducing velocity to %.4f mm/s from %.4f mm/s to match illumination update rate of %d us' % (new_velocity_mm_s, position['velocity_mm_s'], position['led_update_rate_us']))
position['velocity_mm_s'] = new_velocity_mm_s
# Update frame time based on velocity
position['frame_time_s'] = position['frame_distance'] / position['velocity_mm_s']
# Determine number of pixels in exposure time
position['n_blur_positions_exposure'] = math.floor(position['exposure_time_s'] / (position['led_update_rate_us'] / 1e6))
# Determine the distance traveled during the exposure time
position['exposure_distance'] = position['n_blur_positions_exposure'] * position['led_update_rate_us'] / 1e6 * position['velocity_mm_s']
# Store acceleration
position['acceleration_mm_s_2'] = self.motion_acceleration_mm_s_2
# Print information about this pattern
if self.debug > 0:
print('Segment %d, index %d will require %d blur positions per frame (%d during exposure), %.2fms exposure time (%.2fms total frame time), scan %.2fmm (%.2fmm with exposure), move at %.2fmm/s, and update speed %dus' %
(position['linear_segment_index'], frame_index, position['n_blur_positions_frame'],position['n_blur_positions_exposure'], 1000. * position['exposure_time_s'], 1000. * position['frame_time_s'], position['frame_distance'], position['exposure_distance'], position['velocity_mm_s'], position['led_update_rate_us']))
# Check that all blur parameters are valid
assert position['led_update_rate_us'] >= self.max_led_update_rate_us, "LED Array update rate (%dms) < max update rate (%dms)" % (
position['led_update_rate_us'], self.max_led_update_rate_us)
assert position['exposure_time_s'] <= self.max_exposure_time_s, "Exposure time (%.3fs) > max_exposure_time_s (%.3f)" % (
position['exposure_time_s'], self.max_exposure_time_s)
assert position['velocity_mm_s'] <= self.max_velocity_mm_s, "Velocity (%.3fs) > max_velocity_mm_s (%.3f)" % (
position['velocity_mm_s'], self.max_velocity_mm_s)
# List for this positions
single_frame_state_list_position = []
single_frame_time_sequence_s = []
# Determine movement direction
direction = np.asarray((position['y_end'] - position['y_start'],
position['x_end'] - position['x_start']))
direction /= np.linalg.norm(direction)
# Store common information about this frame
common_state_dict = {}
common_state_dict['frame_time'] = position['frame_time_s']
common_state_dict['led_update_rate_us'] = position['led_update_rate_us']
common_state_dict['linear_segment_index'] = position['linear_segment_index']
common_state_dict['frame_distance'] = position['frame_distance']
common_state_dict['exposure_distance'] = position['exposure_distance']
common_state_dict['velocity'] = position['velocity_mm_s']
common_state_dict['acceleration'] = position['acceleration_mm_s_2']
common_state_dict['n_blur_positions_exposure'] = position['n_blur_positions_exposure']
common_state_dict['position_delta_x_mm'] = direction[1] * position['velocity_mm_s'] * position['led_update_rate_us'] / 1e6
common_state_dict['position_delta_y_mm'] = direction[0] * position['velocity_mm_s'] * position['led_update_rate_us'] / 1e6
# Loop over time points (irrelevent for dpc)
for time_index in range(position['n_blur_positions_exposure']):
time_point_state_list = []
x = position['x_start'] + direction[1] * abs(common_state_dict['position_delta_x_mm']) * time_index
y = position['y_start'] + direction[0] * abs(common_state_dict['position_delta_x_mm']) * time_index
# Append this to list with elements for each interframe time point
time_point_state_list.append({'time_index': time_index,
'value': {'x': x, 'y': y}})
# Append to frame_dict
single_frame_state_list_position.append(time_point_state_list)
single_frame_time_sequence_s.append((time_index + 1) * position['led_update_rate_us'] / 1e6)
# Define illumination sequence
position_state_list.append({'states' : single_frame_state_list_position, 'common' : common_state_dict})
# Define time_sequence
time_sequence_s.append(single_frame_time_sequence_s)
# for state in position_state_list:
# print(state['states'][0][0]['value']['x'] - state['states'][-1][0]['value']['x'])
return (position_state_list, time_sequence_s)
def genMotionPathwayRaster(self, object_size_mm, frame_spacing_mm, major_axis=None, include_minor_axis=False):
# Hard-code major axis since the rest of the code doesn't respect it for now
_major_axis = 1
# Detemine number of measurements
measurement_count = np.ceil(np.asarray(object_size_mm) / np.asarray(frame_spacing_mm)).astype(np.int) # two components in x and y
# Error checking
assert np.any(measurement_count > 1), "image_size must be smaller than object_size!"
print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1]))
# If number of measurements along major axis is odd, center this row
offset = [0, 0]
offset[_major_axis] -= frame_spacing_mm[_major_axis] / 2
# Generate raster points
raster_end_point_list = []
pathway = []
linear_segment_index = 0 # This variable keeps track of linear segments, for use with path planning
for row in np.arange(measurement_count[0]):
if row % 2 == 0:
for index, col in enumerate(range(measurement_count[1])):
# Add pathway to list
pathway.append({'x_start': frame_spacing_mm[1] * col + offset[1],
'y_start': frame_spacing_mm[0] * row + offset[0],
'x_end': frame_spacing_mm[1] * (col + 1) + offset[1],
'y_end': frame_spacing_mm[0] * row + offset[0],
'linear_segment_index': linear_segment_index})
# Add minor stride
if row < (measurement_count[0] - 1) and include_minor_axis:
pathway.append({'x_start': frame_spacing_mm[1] * (measurement_count[1] - 1) + offset[1],
'y_start': frame_spacing_mm[0] * row + offset[0],
'x_end': frame_spacing_mm[1] * (measurement_count[1] - 1) + offset[1],
'y_end': frame_spacing_mm[0] * (row + 1) + offset[0],
'linear_segment_index': -1 * (linear_segment_index + 1)})
else:
for index, col in enumerate(reversed(range(measurement_count[1]))):
# Add pathway to list
pathway.append({'x_start': frame_spacing_mm[1] * col - offset[1],
'y_start': frame_spacing_mm[0] * row - offset[0],
'x_end': frame_spacing_mm[1] * (col - 1) - offset[1],
'y_end': frame_spacing_mm[0] * row - offset[0],
'linear_segment_index': linear_segment_index})
# Add minor stride
if row < (measurement_count[0] - 1) and include_minor_axis:
pathway.append({'x_start': - offset[1],
'y_start': frame_spacing_mm[0] * row - offset[0],
'x_end': 0 - offset[1],
'y_end': frame_spacing_mm[0] * (row + 1) - offset[0],
'linear_segment_index': -1 * (linear_segment_index + 1)})
linear_segment_index += 1
print('Generated motion pathway with %d linear segments' % (linear_segment_index))
return pathway
def plotPathway(self):
sequence_list = self.hardware_controller_list['position'].state_sequence
point_list_start = []
point_list_end = []
for sequence in sequence_list:
start_pos = (sequence['states'][0][0]['value']['x'], sequence['states'][0][0]['value']['y'])
end_pos = (sequence['states'][-1][0]['value']['x'], sequence['states'][-1][0]['value']['y'])
point_list_start.append(start_pos)
point_list_end.append(end_pos)
point_list_start = np.asarray(point_list_start)
point_list_end = np.asarray(point_list_end)
plt.figure()
for index in range(len(point_list_start)):
plt.scatter(point_list_start[index, 0], point_list_start[index, 1], c='b')
plt.scatter(point_list_end[index, 0], point_list_end[index, 1], c='r')
plt.plot([point_list_start[index, 0], point_list_end[index, 0]],
[point_list_start[index, 1], point_list_end[index, 1]], c='y')
plt.xlabel('Position X (mm)')
plt.ylabel('Position Y (mm)')
plt.title('Pathway (b is start, y/o is end)')
plt.gca().invert_yaxis()
def genMotionIlluminationSequenceRandom(self, sequence_count=1,
illumination_sequence=None):
led_list = np.arange(self.metadata.illumination.state_list.design.shape[0])
bf_mask = self.metadata.illumination.state_list.design[:, 0] ** 2 \
+ self.metadata.illumination.state_list.design[:, 1] ** 2 < (
self.metadata.objective.na + self.illumination_na_pad) ** 2
illumination_state_list = []
linear_segments_processed = {}
# Loop over DPC patterns (frames)
for frame_index, frame_position_dict in enumerate(self.hardware_controller_list['position'].state_sequence):
frame_position_list = frame_position_dict['states']
# Get number of positions in blur kernel from this frame. Divide into subsequences
pattern_count = len(frame_position_list) // sequence_count
# Determine the number of non-zero illumination positions
pattern_count_used = int(round(pattern_count * self.saturation_factor))
# Place patterns at the END of the full sequence
pattern_count_start = 0
# Get linear segment index
current_segment_index = frame_position_dict['common']['linear_segment_index']
if not self.reuse_illumination_sequence or frame_index == 0:
blur_vector_full = []
# Generate several blur vectors
for _ in range(sequence_count):
# Use provided illumination seqence if given
if illumination_sequence:
blur_vector = illumination_sequence
else:
blur_vector = np.zeros(pattern_count)
# Generate blur vector
blur_vector = np.zeros(pattern_count)
if self.blur_vector_method == 'strobe':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start + pattern_count_used // 2] = 1
elif self.blur_vector_method == 'center':
blur_vector = np.zeros(pattern_count)
# Determine distance traveled within this frame (including readout time)
frame_pixel_count = round(frame_position_list[0][0]['frame_distance'] / (self.metadata.system.eff_pixel_size_um / 1000))
exposure_pixel_count = round(frame_position_list[0][0]['exposure_distance'] / (self.metadata.system.eff_pixel_size_um / 1000))
if not frame_pixel_count // 2 < exposure_pixel_count:
print("WARNING: Camera will not expose during center flash (%d pixels, %d pixels used of %d pixels total)" % (frame_pixel_count // 2, exposure_pixel_count, pattern_count))
blur_vector[pattern_count_used] = 1
else:
# Set center position to be on
blur_vector[frame_pixel_count // 2] = 1
elif self.blur_vector_method == 'start_end':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start] = 1
blur_vector[pattern_count_start + pattern_count_used - 1] = 1
elif self.blur_vector_method == 'start_middle_end':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start] = 1
blur_vector[pattern_count_start + pattern_count_used // 2] = 1
blur_vector[pattern_count_start + pattern_count_used - 1] = 1
elif self.blur_vector_method == 'tens':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start] = 1
blur_vector[pattern_count_start + 10] = 1
blur_vector[pattern_count_start + 20] = 1
blur_vector[pattern_count_start + 30] = 1
blur_vector[pattern_count_start + 40] = 1
elif self.blur_vector_method == 'twenties':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start + 0] = 1
blur_vector[pattern_count_start + 20] = 1
blur_vector[pattern_count_start + 40] = 1
blur_vector[pattern_count_start + 60] = 1
blur_vector[pattern_count_start + 80] = 1
blur_vector[pattern_count_start + 100] = 1
blur_vector[pattern_count_start + 120] = 1
blur_vector[pattern_count_start + 140] = 1
blur_vector[pattern_count_start + 160] = 1
blur_vector[pattern_count_start + 180] = 1
elif self.blur_vector_method == 'quarters':
blur_vector = | np.zeros(pattern_count) | numpy.zeros |
"""
.. module:: util
:synopsis: *Utility classes and functions for the :class:`instrupy` package.*
This module contains the common classes and functions used by the instrument models.
The ``Orientation``, ``SphericalGeometry``, ``ViewGeometry``, ``Maneuver``, ``Antenna`` and ``SyntheticDataConfiguration`` classes are
purposed for handling common instrument parameters
"""
from __future__ import division
import json
import numpy as np
import math
from enum import Enum
from numbers import Number
import scipy.constants
import math
import json
import copy
import scipy.interpolate
import metpy.interpolate
from collections import namedtuple
import lowtran
class Entity(object):
"""An entity is an abstract class to aggregate common functionality.
:ivar _id: Unique identifier for this entity.
:vartype _id: str
:ivar _type: Class type description for this entity.
:vartype _type: str
"""
def __init__(self, _id=None, _type="Entity"):
"""Initialize an entity.
:param _id: (default: None)
:paramtype _id: str
:param _type: (default: "Entity")
:paramtype _type: str
"""
self._id = _id
self._type = _type
def to_dict(self):
"""Convert this entity to a JSON-formatted dictionary."""
# extract and copy the python dictionary
json_dict = dict(self.__dict__)
def recursive_normalize(d):
"""Helper function to recursively remove null values and serialize
unserializable objects from dictionary."""
# if not a dictionary, return immediately
if not isinstance(d, dict):
if isinstance(d, Entity): return d.to_dict()
else: return d
# otherwise loop through each key/value pair
for key, value in list(d.items()):
# if value is None remove key
if value is None:
del d[key]
# else if non-seralizable object, manually serialize to json
elif isinstance(value, Entity):
d[key] = value.to_dict()
# else if list, recursively serialize each list element
elif isinstance(value, list):
d[key] = map(recursive_normalize, value)
# otherwise recursively call function
else: recursive_normalize(value)
return d
recursive_normalize(json_dict)
# translate special python to json keys: _id to @id, _type to @type
if json_dict.get("_id"): json_dict["@id"] = json_dict.pop("_id")
if json_dict.get("_type"): json_dict["@type"] = json_dict.pop("_type")
return json_dict
def to_json(self, file=None, *args, **kwargs):
"""Serializes this entity to a JSON-formatted string or file."""
if file is None:
# return json string
return json.dumps(self.to_dict(), *args, **kwargs)
else:
# write json file
return json.dump(self.to_dict(), file, *args, **kwargs)
@classmethod
def from_json(cls, json_doc):
"""Parses an entity from a JSON-formatted string, dictionary, or file."""
# convert json string or file to dictionary (if necessary)
if isinstance(json_doc, str):
json_doc = json.loads(json_doc)
elif hasattr(json_doc, 'read'):
json_doc = json.load(json_doc)
# if pre-formatted, return directly
if (json_doc is None or isinstance(json_doc, Number) or isinstance(json_doc, Entity)):
return json_doc
# if list, recursively parse each element and return mapped list
if isinstance(json_doc, list):
return map(lambda e: cls.from_json(e), json_doc)
# otherwise use class method to initialize from normalized dictionary
return cls.from_dict(json_doc)
@staticmethod
def from_dict(d):
"""Parses an entity from a normalized JSON dictionary."""
return Entity(_id = d.get("@id", None))
def __eq__(self, other):
"""Overrides the default check if this entity is equal to another by
comparing unique identifiers if available.
"""
# if specified, perform comparison on unique identifier
if self._id is not None:
return self._id == other._id
# otherwise return the default comparison using `is` operator
else:
return self is other
def __ne__(self, other):
"""Overrides the default check if this entity is not equal to another
by comparing unique identifiers if available. (n.b. required for Python 2)
"""
return not self.__eq__(other)
def __hash__(self):
"""Overrides the default hash function by using the unique identifiers
if available."""
# if specified, return the hash of the unique identifier
if self._id is not None:
return hash(self._id)
# otherwise return default hash from superclass
return super(Entity, self).__hash__()
class EnumEntity(str, Enum):
"""Enumeration of recognized types."""
@classmethod
def get(cls, key):
"""Attempts to parse a type from a string, otherwise returns None."""
if isinstance(key, cls):
return key
elif isinstance(key, list):
return list(map(lambda e: cls.get(e), key))
else:
try: return cls(key.upper())
except: return None
class Constants(object):
""" Collection of various frequently utilized constants. Unless indicated otherwise, the constants
are in S.I. units.
"""
radiusOfEarthInKM = 6378.137 # [km] Nominal equatorial radius of Earth
speedOfLight = scipy.constants.speed_of_light# [meters per second]
GMe = 3.986004418e14*1e-9 # [km^3 s^-2] product of Gravitational constant and Mass of Earth
Boltzmann = scipy.constants.physical_constants['Boltzmann constant'][0]
angularSpeedOfEarthInRadPerSec = 7292115e-11 # [rad per sec] WGS-84 nominal mean angular velocity of Earth
Planck = scipy.constants.physical_constants['Planck constant'][0]
SunBlackBodyTemperature = 6000.0 # [Kelvin] Sun black body temperature
SolarRadius = 6.95700e8 # [meters] Solar radius
class SyntheticDataConfiguration(Entity):
""" Class to handle configuration of synthetic data.
:ivar sourceFilePaths: List of filepaths of the science-data files in NetCDF format. Each file corresponds to a specific (forecast/analysis) time.
:vartype sourceFilePaths: list, str
:ivar geophysicalVar: Geophysical variable (name as present in the source NetCDF file) to be used for the synthetic data.
:vartype geophysicalVar: str
:ivar interpolMethod: Interpolation method to be employed while spatially interpolating the source data onto the pixel-positions.
:vartype interpolMethod: :class:`instrupy.util.SyntheticDataConfiguration.InterpolationMethod`
:ivar _id: Unique identifier.
:vartype _id: str
"""
class InterpolationMethod(EnumEntity):
""" Enumeration of recognized interpolation methods which can be used for synthetic data production.
"""
SCIPY_LINEAR = "SCIPY_LINEAR"
METPY_LINEAR = "METPY_LINEAR"
def __init__(self, sourceFilePaths=None, geophysicalVar=None, interpolMethod=None, _id=None):
self.sourceFilePaths = sourceFilePaths if sourceFilePaths is not None else None
if(not isinstance(self.sourceFilePaths, list)):
self.sourceFilePaths = [self.sourceFilePaths]
self.geophysicalVar = str(geophysicalVar) if geophysicalVar is not None else None
self.interpolMethod = SyntheticDataConfiguration.InterpolationMethod.get(interpolMethod) if interpolMethod is not None else None
self.interpolators = {"SCIPY_LINEAR": SyntheticDataInterpolator.scipy_linear, "METPY_LINEAR": SyntheticDataInterpolator.metpy_linear} # list of available interpolators
super(SyntheticDataConfiguration, self).__init__(_id, "SyntheticDataConfiguration")
@staticmethod
def from_dict(d):
""" Construct an ``SyntheticDataConfiguration`` object from a dictionary.
:param d: Dictionary containing the synthetic data config specifications.
:paramtype d: dict
:return: ``SyntheticDataConfiguration`` object initialized with the input specifications.
:rtype: :class:`instrupy.util.SyntheticDataConfiguration`
"""
return SyntheticDataConfiguration(sourceFilePaths = d.get("sourceFilePaths", None),
geophysicalVar = d.get("geophysicalVar", None),
interpolMethod = d.get("interpolMethod", None),
_id = d.get("@id", None))
def to_dict(self):
""" Translate the ``SyntheticDataConfiguration`` object to a Python dictionary such that it can be uniquely reconstructed back from the dictionary.
:return: ``SyntheticDataConfiguration`` object as python dictionary
:rtype: dict
"""
syndataconf_dict = {"sourceFilePaths": self.sourceFilePaths,
"geophysicalVar": self.geophysicalVar,
"interpolMethod": self.interpolMethod,
"@id": self._id
}
return syndataconf_dict
def get_interpolator(self):
""" Get the interpolator associated with the configured interpolation method.
:return: Interpolation function.
:rtype: function
"""
interp = self.interpolators.get(self.interpolMethod.value)
if not interp:
print('{} interpolation method was not recognized.'.format(self.interpolMethod.value))
raise ValueError(self.interpolMethod.value)
return interp
def __repr__(self):
return "SyntheticDataConfiguration.from_dict({})".format(self.to_dict())
def __eq__(self, other):
# Equality test is simple one which compares the data attributes.
# note that _id data attribute may be different
# @TODO: Make the list of strings comparison (sourceFilePaths variable) insensitive to order of the elements in the list and the case (upper/ lower cases).
if(isinstance(self, other.__class__)):
return (self.sourceFilePaths==other.sourceFilePaths) and (self.geophysicalVar==other.geophysicalVar) and (self.interpolMethod==other.interpolMethod)
else:
return NotImplemented
class SyntheticDataInterpolator:
""" Class containting functional implementations of interpolators which shall be used to
interpolate geophysical variable data onto pixel (center) positions to produce synthetic observations.
"""
@staticmethod
def scipy_linear(lons, lats, var_data, pixel_center_pos):
""" Execute SciPy linear interpolation on the input data.
:param lons: (degrees) List of longitudes making the base grid.
:paramtype lons: list, float
:param lats: (degrees) List of latitudes making the base grid.
:paramtype lons: list, float
:param var_data: Geophysical variable data on the base grid.
:paramtype var_data: list, float
:param pixel_center_pos: Pixel center positions (locations to which the interpolation should take place).
List of dictionaries with the dictionary keys as:
1. lon[deg]: Longitude
2. lat[deg]: Latitude
:paramtype pixel_center_pos: list, dict
:returns: Interpolated values at the pixel center positions.
:rtype: list, float
"""
f = scipy.interpolate.interp2d(lons, lats, var_data, kind='linear')
interpl_data = []
for _pix_p in pixel_center_pos:
lon = _pix_p['lon[deg]']
lat = _pix_p['lat[deg]']
interpl_data.append(f(lon, lat)[0]) # [0] is needed to convert from the single element np.array to float
print("Expect inaccuracy around longitude=0 deg")
return interpl_data
@staticmethod
def metpy_linear(lons, lats, var_data, pixel_center_pos):
""" Execute MetPy linear interpolation on the input data.
:param lons: (degrees) List of longitudes making the base grid.
:paramtype lons: list, float
:param lats: (degrees) List of latitudes making the base grid.
:paramtype lons: list, float
:param var_data: Geophysical variable data on the base grid.
:paramtype var_data: list, float
:param pixel_center_pos: Pixel center positions (locations to which the interpolation should take place).
List of dictionaries with the dictionary keys as:
1. lon[deg]: Longitude
2. lat[deg]: Latitude
:paramtype pixel_center_pos: list, dict
:returns: Interpolated values at the pixel center positions.
:rtype: list, float
"""
# transform input data into format required by the MetPy API
X = np.array(lons)
Y = np.array(lats)
X = X.reshape((np.prod(X.shape),))
Y = Y.reshape((np.prod(Y.shape),))
coords = np.dstack((X,Y))[0]
var_data = np.array(var_data).flatten()
pix_cen_pos = []
for x in pixel_center_pos:
pix_cen_pos.append([x["lon[deg]"],x["lat[deg]"]])
interpl_data = metpy.interpolate.interpolate_to_points(coords, var_data, pix_cen_pos, interp_type='linear')
# metpy.interpolate.interpolate_to_points(coords, var_data, pixel_center_pos, interp_type='linear',
# minimum_neighbors=3, gamma=0.25, kappa_star=5.052,
# search_radius=None, rbf_func='linear', rbf_smooth=0)
return interpl_data
class ReferenceFrame(EnumEntity):
""" Enumeration of recognized reference frames.
:cvar EARTH_CENTERED_INERTIAL: Earth Centered Inertial reference frame.
This is an Earth equator inertial reference frame identical to EarthMJ2000Eq coordinate system used in GMAT.
The nominal x-axis points along the line formed by the intersection of the Earth’s
mean equatorial plane and the mean ecliptic plane (at the J2000 epoch), in the direction
of Aries. The z-axis is normal to the Earth’s mean equator at the J2000 epoch and the
y-axis completes the right-handed system. The mean planes of the ecliptic and equator,
at the J2000 epoch, are computed using IAU-1976/FK5 theory with 1980 update for nutation.
:vartype EARTH_CENTERED_INERTIAL: str
:cvar EARTH_FIXED: Earth Fixed reference frame.
The Earth Fixed reference frame is referenced to the Earth's equator and the prime meridian
and is computed using IAU-1976/FK5 theory. This system is identical to the EarthFixed coordinate
system used in GMAT.
:vartype EARTH_FIXED: str
:cvar NADIR_POINTING: Nadir-pointing reference frame.
The axis of the Nadir-pointing reference frame are defined as follows:
* :math:`\\bf X_{np}` axis: :math:`-({\\bf Z_{np}} \\times {\\bf V})`, where :math:`\\bf V` is the Velocity vector of satellite in EARTH_FIXED frame)
* :math:`\\bf Y_{np}` axis: :math:`({\\bf Z_{np}} \\times {\\bf X_{np}})`
* :math:`\\bf Z_{np}` axis: Aligned to Nadir vector (i.e. the negative of the position vector of satellite in EARTH_FIXED frame)
.. todo:: Verify the claim about position vector and velocity vector in EARTH_FIXED frame.
:vartype NADIR_POINTING: str
:cvar SC_BODY_FIXED: Spacecraft Body Fixed reference frame. The axis of this coordinate system is fixed to the Spacecraft Bus.
:vartype SC_BODY_FIXED: str
:cvar SENSOR_BODY_FIXED: Sensor Body Fixed reference frame. The axis of this coordinate system is fixed to the Sensor.
:vartype SENSOR_BODY_FIXED: str
"""
EARTH_CENTERED_INERTIAL = "EARTH_CENTERED_INERTIAL"
EARTH_FIXED = "EARTH_FIXED"
NADIR_POINTING = "NADIR_POINTING"
SC_BODY_FIXED = "SC_BODY_FIXED"
SENSOR_BODY_FIXED = "SENSOR_BODY_FIXED"
class Orientation(Entity):
""" Class to store and handle orientation. Orientation is parameterized as intrinsic rotations specified by Euler angles and sequence
with respect to the user-specified reference frame. The definition of the Euler angle rotation is identical to the
one used in the orbitpy->propcov->extern->gmatutil->util->AttitudeUtil, AttitudeConversionUtility C++ classes.
A Euler sequence = 123 implies the following rotation: R = R3.R2.R1, where Ri is the rotation matrix about the ith axis.
A positive angle corresponds to an anti-clockwise rotation about the respective axis.
Each rotation matrix rotates the coordinate system (not the vector).
See:
* https://mathworld.wolfram.com/RotationMatrix.html
:ivar ref_frame: Reference frame. Default in "NADIR_POINTING".
:vartype ref_frame: str
:ivar euler_angle1: (deg) Rotation angle corresponding to the first rotation. Default is 0.
:vartype euler_angle1: float
:ivar euler_angle2: (deg) Rotation angle corresponding to the second rotation. Default is 0.
:vartype euler_angle2: float
:ivar euler_angle3: (deg) Rotation angle corresponding to the third rotation. Default is 0.
:vartype euler_angle3: float
:ivar euler_seq1: Axis-number corresponding to the first rotation. Default is 1.
:vartype euler_angle1: int
:ivar euler_seq2: Axis-number corresponding to the second rotation. Default is 2.
:vartype euler_angle2: int
:ivar euler_seq3: Axis-number corresponding to the third rotation. Default is 3.
:vartype euler_angle3: int
:ivar _id: Unique identifier.
:vartype _id: str
"""
def __init__(self, ref_frame="NADIR_POINTING", euler_angle1=0, euler_angle2=0, euler_angle3=0, euler_seq1=int(1), euler_seq2=int(2), euler_seq3=int(3), _id=None):
self.ref_frame = ReferenceFrame.get(ref_frame)
self.euler_angle1 = float(euler_angle1)%360
self.euler_angle2 = float(euler_angle2)%360
self.euler_angle3 = float(euler_angle3)%360
self.euler_seq1 = euler_seq1
self.euler_seq2 = euler_seq2
self.euler_seq3 = euler_seq3
super(Orientation, self).__init__(_id, "Orientation")
'''
def get_pointing_vector_angle_wrt_input_vector(self, vec):
""" Get the angle between the pointing-vector and an input vector. This function may be applied to calculate the off-nadir angle of the
instrument pointing. The pointing axis is assumed to be aligned along the z-axis SENSOR_BODY_FIXED frame.
:param vec: Input vector.
:paramtype vec: array_like, shape (3, 1), float
:return: Angle between the pointing vector and the input vector
:rtype: float
"""
pointing_axis_sensor_frame = np.array([0,0,1]) # assumed pointing axis in the SENSOR_BODY_FIXED frame
'''
@staticmethod
def get_rotation_matrix(axis, angle_deg):
""" Get the rotation matrix corresponding to an input rotation axis (x or y or z) and rotation angle.
TODO: Write unittest for this function.
:param axis: Rotation axis index. Must be 1 or 2 or 3.
:paramtype axis: int
:param angle_deg: Rotation angle in degrees.
:paramtype angle_deg: float
:return: 3x3 rotation matrix.
:rtype: np.array, shape (3,3), float
"""
angle = np.deg2rad(angle_deg)
if axis == 1:
return np.array([[1, 0, 0],
[0, np.cos(angle), np.sin(angle)],
[0, -np.sin(angle), np.cos(angle)]
])
if axis == 2:
return np.array([[np.cos(angle), 0, -np.sin(angle)],
[ 0, 1, 0],
[np.sin(angle), 0, np.cos(angle)]
])
if axis == 3:
return np.array([[ np.cos(angle), np.sin(angle), 0],
[-np.sin(angle), np.cos(angle), 0],
[ 0, 0, 1]
])
class Convention(EnumEntity):
""" Enumeration of recognized orientation conventions with which an object can be initialized. The rotations below can be specified with respect to
any of the reference frames given in :class:`instrupy.util.ReferenceFrame`.
:cvar XYZ: Rotations about the X, Y and Z axis in the order 123.
:vartype XYZ: str
:cvar REF_FRAME_ALIGNED: Aligned with respective to the underlying reference frame. Identity rotation matrix.
:vartype REF_FRAME_ALIGNED: str
:cvar SIDE_LOOK: Rotation about the Y axis only.
:vartype SIDE_LOOK: str
:cvar EULER: Rotation according to the specified Euler angles and sequence.
:vartype EULER: str
"""
XYZ = "XYZ"
REF_FRAME_ALIGNED = "REF_FRAME_ALIGNED"
SIDE_LOOK = "SIDE_LOOK"
EULER = "EULER"
@classmethod
def from_sideLookAngle(cls, ref_frame="NADIR_POINTING", side_look_angle=0, _id=None):
""" Return :class:`Orientation` object constructed from the side-look angle.
:param ref_frame: Reference frame. Default in "NADIR_POINTING".
:paramtype ref_frame: str
:param side_look_angle: (deg) Side look angle. A positive angle corresponds to anti-clockwise rotation applied around the y-axis. Default is 0.
:paramtype side_look_angle: float
:param _id: Unique identifier.
:paramtype _id: str
:return: Corresponding ``Orientation`` object.
:rtype: :class:`instrupy.util.Orientation`
"""
return Orientation(ref_frame, 0.0, side_look_angle, 0.0, 1,2,3,_id)
@classmethod
def from_XYZ_rotations(cls, ref_frame="NADIR_POINTING", x_rot=0, y_rot=0, z_rot=0, _id = None):
""" Return :class:`Orientation` object constructed from the user-specified XYZ rotation angles with
the sequence=123.
:param ref_frame: Reference frame. Default in "NADIR_POINTING".
:paramtype ref_frame: str
:ivar x_rot: (deg) Rotation about X-axis. Default is 0.
:vartype x_rot: float
:ivar y_rot: (deg) Rotation about Y-axis. Default is 0.
:vartype y_rot: float
:ivar z_rot: (deg) Rotation about Z-axis. Default is 0.
:vartype z_rot: float`
:param _id: Unique identifier.
:paramtype _id: str
"""
return Orientation(ref_frame, x_rot, y_rot, z_rot, 1,2,3,_id)
@staticmethod
def from_dict(d):
"""Parses a ``Orientation`` object from the input dictionary.
:param d: Dictionary containing the orientation specifications.
:paramtype d: dict
:return: ``Orientation`` object initialized with the input specifications.
:rtype: :class:`instrupy.util.Orientation`
"""
orien_conv = Orientation.Convention.get(d.get("convention", None))
ref_frame = ReferenceFrame.get(d.get("referenceFrame", "NADIR_POINTING")).value # default reference frame is NADIR_POINTING
if(orien_conv == "XYZ"):
return Orientation.from_XYZ_rotations(ref_frame=ref_frame, x_rot=d.get("xRotation", 0), y_rot=d.get("yRotation", 0), z_rot=d.get("zRotation", 0), _id = d.get("@id", None))
elif(orien_conv == "SIDE_LOOK"):
return Orientation.from_sideLookAngle(ref_frame=ref_frame, side_look_angle=d.get("sideLookAngle", 0), _id=d.get("@id", None))
elif(orien_conv == "REF_FRAME_ALIGNED"):
return Orientation.from_sideLookAngle(ref_frame=ref_frame, side_look_angle=0, _id=d.get("@id", None))
elif(orien_conv == "EULER"):
return Orientation(ref_frame=ref_frame, euler_angle1=d.get("eulerAngle1", 0), euler_angle2=d.get("eulerAngle2", 0),
euler_angle3=d.get("eulerAngle3", 0), euler_seq1=d.get("eulerSeq1", 1), euler_seq2=d.get("eulerSeq2", 2),
euler_seq3=d.get("eulerSeq3", 3), _id=d.get("@id", None))
else:
raise Exception("Invalid or no Orientation convention specification")
def to_tuple(self): # TODO: remove this function
""" Return data members of the instance as a tuple.
:return: ``Orientation`` object data attributes as namedtuple.
:rtype: namedtuple, (str, int, int, int, float, float, float)
"""
orientation = namedtuple("orientation", ["ref_frame", "euler_seq1", "euler_seq2", "euler_seq3", "euler_angle1", "euler_angle2", "euler_angle3"])
return orientation(self.ref_frame, self.euler_seq1, self.euler_seq2, self.euler_seq3, self.euler_angle1, self.euler_angle2, self.euler_angle3)
def to_dict(self):
""" Translate the ``Orientation`` object to a Python dictionary such that it can be uniquely reconstructed back from the dictionary.
:return: ``Orientation`` object as python dictionary.
:rtype: dict
"""
orien_dict = {
"referenceFrame": self.ref_frame.value,
"convention": "EULER",
"eulerAngle1": self.euler_angle1,
"eulerAngle2": self.euler_angle2,
"eulerAngle3": self.euler_angle3,
"eulerSeq1": self.euler_seq1,
"eulerSeq2": self.euler_seq2,
"eulerSeq3": self.euler_seq3,
"@id": self._id
}
return orien_dict
def __repr__(self):
if isinstance(self._id, str):
return "Orientation(ref_frame='{}',euler_angle1={},euler_angle2={},euler_angle3={},euler_seq1={},euler_seq2={},euler_seq3={},_id='{}')".format(self.ref_frame, self.euler_angle1, self.euler_angle2, self.euler_angle3,
self.euler_seq1, self.euler_seq2, self.euler_seq3, self._id)
else:
return "Orientation(ref_frame='{}',euler_angle1={},euler_angle2={},euler_angle3={},euler_seq1={},euler_seq2={},euler_seq3={},_id={})".format(self.ref_frame, self.euler_angle1, self.euler_angle2, self.euler_angle3,
self.euler_seq1, self.euler_seq2, self.euler_seq3, self._id)
def __eq__(self, other):
# Equality test is simple one which compares the data attributes. It does not cover complex cases where the data members may be unequal, but
# the Orientation is physically the same.
# note that _id data attribute may be different
if(isinstance(self, other.__class__)):
return (self.ref_frame==other.ref_frame) and (self.euler_angle1==other.euler_angle1) and (self.euler_angle2==other.euler_angle2) and (self.euler_angle3==other.euler_angle3) \
and (self.euler_seq1==other.euler_seq1) and (self.euler_seq2==other.euler_seq2) and (self.euler_seq3==other.euler_seq3)
else:
return NotImplemented
class SphericalGeometry(Entity):
""" Class to handle spherical geometries (spherical polygons and circles) which define an closed angular space of interest.
The spherical geometry is maintained internally via vector of cone and clock angles defined in the SENSOR_BODY_FIXED frame with
the Z-axis as the pointing axis. This can be paired with an Orientation object (which describes the orientation of the sensor (hence the SENSOR_BODY_FIXED frame)
with respect to a reference frame) to obtain the position of the spherical geometry in any desired reference frame.
:ivar shape: Shape of the spherical geometry. Accepted values are "CIRCULAR", "RECTANGULAR" or "CUSTOM".
:vartype shape: str
:ivar cone_angle_vec: (deg) Array of cone angles measured from +Z sensor axis (pointing axis). If (:math:`xP`, :math:`yP`, :math:`zP`) is a unit vector describing a point on unit sphere, then the
cone angle for the point is :math:`\\pi/2 - \\sin^{-1}zP`.
:vartype cone_angle_vec: list, float
:ivar clock_angle_vec: (deg) Array of clock angles (right ascensions) measured anti-clockwise from the +X-axis. If (:math:`xP`, :math:`yP`, :math:`zP`) is a unit vector
describing a point on unit sphere, then the clock angle for the point is :math:`atan2(yP,xP)`.
:vartype clock_angle_vec: list, float
:ivar diameter: (deg) Spherical circular (about the sensor Z axis) diameter (only for CIRCULAR shape).
:vartype diameter: float
:ivar angle_height: (deg) Spherical rectangular geometry angular width (about sensor X axis) (only for *RECTANGULAR* shape).
Corresponds to along-track angular width if sensor frame is aligned to *NADIR_POINTING* frame.
:vartype angle_height: float
:ivar angle_width: (deg) Spherical rectangular geometry angular height (about sensor Y axis) (only for *RECTANGULAR* shape).
Corresponds to cross-track angular width if sensor frame is aligned to *NADIR_POINTING* frame.
:vartype angle_width: float
:param _id: Unique identifier.
:paramtype _id: str
.. note:: :code:`cone_angle_vec[0]` ties to :code:`clock_angle_vec[0]`, and so on. Except for the case of *CIRCULAR* shape, in which we
have only one cone angle (:code:`cone_angle_vec[0] = 1/2 diameter`) and no corresponding clock angle.
"""
class Shape(EnumEntity):
"""Enumeration of recognized SphericalGeometry shapes.
:cvar CIRCULAR: Circular shape definition, characterized by the radius of the circle around the Z-axis.
:vartype CIRCULAR: str
:cvar RECTANGULAR: Rectangular spherical polygon definition, characterized by angular width (about Y-axis) and angular height (about X-axis).
:vartype RECTANGULAR: str
:cvar CUSTOM: Custom polygon definition, where an arbitrary number of cone, clock angles
denoting the vertices of the spherical polygon can be specified.
:vartype CUSTOM: str
"""
CIRCULAR = "CIRCULAR"
RECTANGULAR = "RECTANGULAR"
CUSTOM = "CUSTOM"
def __init__(self, shape=None, cone_angle_vec=None, clock_angle_vec=None, _id=None):
if(cone_angle_vec is not None):
if(isinstance(cone_angle_vec, list)):
self.cone_angle_vec = list(map(float, cone_angle_vec))
self.cone_angle_vec = [x%360 for x in self.cone_angle_vec]
else:
self.cone_angle_vec = [float(cone_angle_vec)%360]
else:
self.cone_angle_vec = None
if(clock_angle_vec is not None):
if(isinstance(clock_angle_vec, list)):
self.clock_angle_vec = list(map(float, clock_angle_vec))
self.clock_angle_vec = [x%360 for x in self.clock_angle_vec]
else:
self.clock_angle_vec = [float(clock_angle_vec)%360]
else:
self.clock_angle_vec = None
self.shape = SphericalGeometry.Shape.get(shape) if shape is not None else None
self.diameter = None
self.angle_height = None
self.angle_width = None
if(self.shape==SphericalGeometry.Shape.CIRCULAR):
self.diameter = 2 * self.cone_angle_vec[0]
self.angle_height = self.diameter
self.angle_width = self.diameter
elif(self.shape==SphericalGeometry.Shape.RECTANGULAR):
[self.angle_height, self.angle_width] = SphericalGeometry.get_rect_poly_specs_from_cone_clock_angles(self.cone_angle_vec, self.clock_angle_vec)
super(SphericalGeometry, self).__init__(_id, "SphericalGeometry")
@staticmethod
def from_dict(d):
"""Parses spherical geometry specifications from a normalized JSON dictionary.
:param d: Dictionary with the spherical geometry specifications.
:paramtype d: dict
:return: Spherical geometry object
:rtype: :class:`instrupy.util.SphericalGeometry`
"""
shape = SphericalGeometry.Shape.get(d.get("shape", None))
if(shape == "CIRCULAR"):
sph_geom_dict = SphericalGeometry.from_circular_specs(d.get("diameter", None), d.get("@id", None))
elif(shape == "RECTANGULAR"):
sph_geom_dict = SphericalGeometry.from_rectangular_specs(d.get("angleHeight", None), d.get("angleWidth", None), d.get("@id", None))
elif(shape == "CUSTOM"):
sph_geom_dict = SphericalGeometry.from_custom_specs(d.get("customConeAnglesVector", None), d.get("customClockAnglesVector", None), d.get("@id", None))
else:
raise Exception("Invalid spherical geometry shape specified.")
return sph_geom_dict
def to_dict(self):
""" Translate the ``SphericalGeometry`` object to a Python dictionary such that it can be uniquely reconstructed back from the dictionary.
:return: ``SphericalGeometry`` object as python dictionary.
:rtype: dict
"""
if self.shape==SphericalGeometry.Shape.CIRCULAR:
sph_geom_dict = {"shape": "CIRCULAR", "diameter": self.diameter, "@id": self._id}
elif self.shape==SphericalGeometry.Shape.RECTANGULAR:
sph_geom_dict = {"shape": "RECTANGULAR", "angleHeight": self.angle_height, "angleWidth": self.angle_width, "@id": self._id}
elif self.shape==SphericalGeometry.Shape.CUSTOM:
sph_geom_dict = {"shape": "CUSTOM",
"customConeAnglesVector": "[" + ','.join(map(str, self.cone_angle_vec)) + "]",
"customClockAnglesVector": "[" + ','.join(map(str, self.clock_angle_vec)) + "]",
"@id": self._id
}
else:
sph_geom_dict = None
return sph_geom_dict
def __eq__(self, other):
# Equality test is simple one which compares the data attributes. It does not cover complex cases where the data members may be unequal, but
# the spherical shape is physically the same.
# note that _id data attribute may be different
if(isinstance(self, other.__class__)):
return (self.shape==other.shape) and (self.diameter==other.diameter) == (self.angle_width==other.angle_width) and (self.angle_height==other.angle_height) and \
(self.cone_angle_vec==other.cone_angle_vec) and (self.clock_angle_vec==other.clock_angle_vec)
else:
return NotImplemented
@classmethod
def from_custom_specs(cls, cone_angle_vec=None, clock_angle_vec=None, _id=None):
""" Return corresponding :class:`instrupy.util.SphericalGeometry` object from input cone and clock angles.
:param cone_angle_vec: (deg) Array of cone angles measured from +Z sensor axis. If (:math:`xP`, :math:`yP`, :math:`zP`) is a unit vector describing a point on unit sphere, then the
cone angle for the point is :math:`\\pi/2 - \\sin^{-1}zP`.
:paramtype cone_angle_vec: list, float
:param clock_angle_vec: (deg) Array of clock angles (right ascensions) measured anti-clockwise from the + X-axis. If (:math:`xP`, :math:`yP`, :math:`zP`) is a unit vector
describing a point on the unit sphere, then the clock angle for the point is :math:`atan2(yP,xP)`.
:paramtype clock_angle_vec: list, float
:param _id: Unique identifier.
:paramtype _id: str
:return: Corresponding ``SphericalGeometry`` object
:rtype: :class:`instrupy.util.SphericalGeometry`
.. note:: :code:`cone_angle_vec[0]` ties to :code:`clock_angle_vec[0]`, and so on. Except for the case of *CIRCULAR* shaped FOV, in which we
have only one cone angle (:code:`cone_angle_vec[0] = 1/2 diameter`) and no corresponding clock angle.
"""
if(cone_angle_vec):
if(not isinstance(cone_angle_vec, list)):
cone_angle_vec = [cone_angle_vec]
else:
raise Exception("No cone angle vector specified!")
if(clock_angle_vec):
if(not isinstance(clock_angle_vec, list)):
clock_angle_vec = [clock_angle_vec]
if(any(cone_angle_vec) < 0 or any(cone_angle_vec) > 90):
raise Exception("CUSTOM cone angles specification must be in the range 0 deg to 90 deg.")
if(len(cone_angle_vec) == 1 and (clock_angle_vec is not None)):
raise Exception("With only one cone angle specified, there should be no clock angles specified.")
if(not(len(cone_angle_vec) == 1 and (clock_angle_vec is None))):
if(len(cone_angle_vec) != len(clock_angle_vec)):
raise Exception("With more than one cone angle specified, the length of cone angle vector should be the same as length of the clock angle vector.")
return SphericalGeometry("CUSTOM", cone_angle_vec, clock_angle_vec, _id)
@classmethod
def from_circular_specs(cls, diameter=None, _id=None):
""" Convert input circular specs to cone, clock angles and return corresponding :class:`instrupy.util.SphericalGeometry` object.
:param diameter: (deg) Diameter of the circle.
:paramtype diameter: float
:param _id: Unique identifier
:paramtype _id: str
:return: Corresponding ``SphericalGeometry`` object
:rtype: :class:`instrupy.util.SphericalGeometry`
"""
if diameter is None:
raise Exception("Please specify diameter of the CIRCULAR fov.")
if(diameter < 0 or diameter > 180):
raise Exception("Specified diameter of CIRCULAR fov must be within the range 0 deg to 180 deg")
return SphericalGeometry("CIRCULAR", 0.5*diameter, None, _id)
@classmethod
def from_rectangular_specs(cls, angle_height=None, angle_width=None, _id=None):
""" Convert the angle_height and angle_width rectangular specs to clock, cone angles and return corresponding :class:`instrupy.util.SphericalGeometry` object.
:param angle_height: (deg) Angular height (about sensor X axis). Corresponds to along-track FOV if sensor is aligned to *NADIR_POINTING* frame.
:paramtype angle_height: float
:param angle_width: (deg) Angular width (about sensor Y axis). Corresponds to cross-track FOV if sensor is aligned to *NADIR_POINTING* frame.
:paramtype angle_width: float
:param _id: Unique identifier
:paramtype _id: str
:return: Corresponding ``SphericalGeometry`` object
:rtype: :class:`instrupy.util.SphericalGeometry`
"""
if(angle_height is None or angle_width is None):
raise Exception("Please specify the angle_height and angle_width for the RECTANGULAR fov.")
if(angle_height < 0 or angle_height > 180 or angle_width < 0 or angle_width > 180):
raise Exception("Specified angle_height and angle_width of the RECTANGULAR fov must be within the range 0 deg to 180 deg")
angle_height = np.deg2rad(angle_height)
angle_width = np.deg2rad(angle_width)
cosCone = np.cos(angle_height/2.0)*np.cos(angle_width/2.0)
cone = np.arccos(cosCone)
sinClock = np.sin(angle_height/2.0) / np.sin(cone)
clock = np.arcsin(sinClock)
cone = np.rad2deg(cone)
clock = np.rad2deg(clock)
cone_angle_vec = [cone, cone, cone, cone]
clock_angle_vec = [clock, 180.0-clock, 180.0+clock, -clock]
return SphericalGeometry("RECTANGULAR", cone_angle_vec, clock_angle_vec, _id)
def get_cone_clock_fov_specs(self):
""" Function to the get the cone and clock angle vectors from the respective ``SphericalGeometry`` object.
:return: Cone, Clock angles in degrees
:rtype: list, float
"""
return [self.cone_angle_vec, self.clock_angle_vec]
@staticmethod
def get_rect_poly_specs_from_cone_clock_angles(cone_angle_vec, clock_angle_vec):
""" Function to get the rectangular specifications (angle_height and angle_width), from input clock, cone angle vectors.
:param cone_angle_vec: (deg) Array of cone angles measured from +Z sensor axis. If (:math:`xP`, :math:`yP`, :math:`zP`) is a unit vector describing a point on unit sphere, then the
cone angle for the point is :math:`\\pi/2 - \\sin^{-1}zP`.
:paramtype cone_angle_vec: list, float
:param clock_angle_vec: (deg) Array of clock angles (right ascensions) measured anti-clockwise from the + X-axis. If (:math:`xP`, :math:`yP`, :math:`zP`) is a unit vector
describing a point on unit sphere, then the clock angle for the point is :math:`atan2(yP,xP)`.
:paramtype clock_angle_vec: list, float
:return: angle_height and angle_width in degrees
:rtype: list, float
.. todo:: Make sure selected clock angle is from first quadrant.
"""
# Check if the instance does correspond to an rectangular shape.
# Length of cone angle vector and clock angle vector must be 4.
if(len(cone_angle_vec) != 4) or (len(clock_angle_vec) != 4):
raise Exception("This SphericalGeometry instance does not correspond to a rectangular shape.")
# Check that all elements in the cone angle vector are the same value.
if(len(set(cone_angle_vec))!= 1):
raise Exception("This SphericalGeometry instance does not correspond to a rectangular shape.")
# The elements of the clock angle vector satisfy the following relationship: [theta, 180-theta, 180+theta, 360-theta]
# in case of rectangular shape. Check for this relationship.
if(not math.isclose(clock_angle_vec[3],(360-clock_angle_vec[0])) or not math.isclose(clock_angle_vec[1], (180 - clock_angle_vec[0])) or not math.isclose(clock_angle_vec[2], (180 + clock_angle_vec[0]))):
raise Exception("This SphericalGeometry instance does not correspond to a rectangular shape.")
theta = np.deg2rad(cone_angle_vec[0])
omega = np.deg2rad(clock_angle_vec[0])
alpha = np.arcsin(np.sin(theta)*np.sin(omega))
beta = np.arccos(np.cos(theta)/np.cos(alpha))
angle_height = 2*np.rad2deg(alpha)
angle_width = 2* | np.rad2deg(beta) | numpy.rad2deg |
"""Toolkit for exploratory work regarding the polarization transfer coefficients
analyzed in Heyvaerts et al 2013.
Heyvaert's "f" variable is usually called r_V or rho_V by other authors. The
variable "h" is usually called r_Q or rho_Q.
"""
import numpy as np
from pwkit import cgs
from pwkit.numutil import broadcastize, parallel_quad
from scipy.integrate import quad
M3_C3 = cgs.me**3 * cgs.c**3
FOUR_PI_M3_C3 = 4 * cgs.pi * M3_C3
DEFAULT_S = 10.
DEFAULT_THETA = 0.5
# Set this True to override safety checks for incompletely implemented physics.
# Stands for "I know what I'm doing."
IKWID = False
# Bessel function fun. Scipy names second-kind Bessels as Y_v(x); we follow
# Heyvaerts and use N_v(x).
from scipy.special import jv as jv_scipy, jvp as jvp_scipy, yv as nv_scipy, yvp as nvp_scipy, \
kv as kv_scipy, iv as iv_scipy
def lv(nu, x):
"""Similar to a modified Bessel function of the second kind, but not the
same.
"""
return 0.5 * np.pi * (iv_scipy(-nu, x) + iv_scipy(nu, x)) / np.sin(nu * np.pi)
def jv_nicholson(sigma, x):
"""Nicholson's approximation J_sigma(x), for x somewhat smaller than sigma.
Equations 94, 95.
"""
g = (2 * (sigma - x))**1.5 / (3 * np.sqrt(x))
return kv_scipy(1./3, g) * np.sqrt(2 * (sigma - x) / (3 * x)) / np.pi
def nv_nicholson(sigma, x):
"""Nicholson's approximation N_sigma(x), for x somewhat smaller than sigma.
Equations 94, 95.
"""
g = (2 * (sigma - x))**1.5 / (3 * np.sqrt(x))
return -lv(1./3, g) * np.sqrt(2 * (sigma - x) / x) / np.pi
def jvp_nicholson(sigma, x):
"""Nicholson's approximation J'_sigma(x), for x somewhat smaller than sigma.
Equations 94, 96.
The derivative approximations do not converge nearly as well as the
non-derivatives.
"""
g = (2 * (sigma - x))**1.5 / (3 * np.sqrt(x))
return kv_scipy(2./3, g) * 2 * (sigma - x) / (3**0.5 * np.pi * x)
def nvp_nicholson(sigma, x):
"""Nicholson's approximation N'_sigma(x), for x somewhat smaller than sigma.
Equations 94, 96.
The derivative approximations do not converge nearly as well as the
non-derivatives.
"""
g = (2 * (sigma - x))**1.5 / (3 * np.sqrt(x))
return lv(2./3, g) * 2 * (sigma - x) / (np.pi * x)
# coefficients from http://dlmf.nist.gov/10.41#ii, 10.41.10 etc. u0 = v0 = 1.
# Inspired by Heyvaerts but functions from http://dlmf.nist.gov/10.19
_debye_u1_coeffs = np.array([-5., 0, 3, 0]) / 24
_debye_u2_coeffs = np.array([385., 0, -462, 0, 81, 0, 0]) / 1152
_debye_u3_coeffs = np.array([-425425., 0, 765765, 0, -369603, 0, 30375, 0, 0, 0]) / 414720
_debye_v1_coeffs = np.array([7., 0, -9, 0]) / 24
_debye_v2_coeffs = np.array([-455., 0, 594, 0, -135, 0, 0]) / 1152
_debye_v3_coeffs = np.array([475475., 0, -883575, 0, 451737, 0, -42525, 0, 0, 0]) / 414720
def jv_debye(sigma, x):
"""The Debye expansion of J_sigma(x), used with large x and sigma."""
alpha = np.arccosh(sigma / x)
tanha = np.tanh(alpha)
cotha = 1. / tanha
s = (1. + # m=0 term
np.polyval(_debye_u1_coeffs, cotha) / sigma + # m=1
np.polyval(_debye_u2_coeffs, cotha) / sigma**2 + # m=2
np.polyval(_debye_u3_coeffs, cotha) / sigma**3) # m=3
return np.exp(sigma * (tanha - alpha)) * s / np.sqrt(2 * np.pi * sigma * tanha)
def nv_debye(sigma, x):
"""The Debye expansion of N_sigma(x), used with large x and sigma."""
alpha = np.arccosh(sigma / x)
tanha = np.tanh(alpha)
cotha = 1. / tanha
s = (1. - # m=0 term; note alternating signs
np.polyval(_debye_u1_coeffs, cotha) / sigma + # m=1
np.polyval(_debye_u2_coeffs, cotha) / sigma**2 - # m=2
np.polyval(_debye_u3_coeffs, cotha) / sigma**3) # m=3
return -np.exp(sigma * (alpha - tanha)) * s / np.sqrt(0.5 * np.pi * sigma * tanha)
def jvp_debye(sigma, x):
"""The Debye expansion of J'_sigma(x), used with large x and sigma."""
alpha = np.arccosh(sigma / x)
tanha = np.tanh(alpha)
cotha = 1. / tanha
s = (1. + # m=0 term
np.polyval(_debye_v1_coeffs, cotha) / sigma + # m=1
np.polyval(_debye_v2_coeffs, cotha) / sigma**2 + # m=2
np.polyval(_debye_v3_coeffs, cotha) / sigma**3) # m=3
return np.exp(sigma * (tanha - alpha)) * s * np.sqrt(np.sinh(2 * alpha) / (4 * np.pi * sigma))
def nvp_debye(sigma, x):
"""The Debye expansion of N'_sigma(x), used with large x and sigma."""
alpha = np.arccosh(sigma / x)
tanha = np.tanh(alpha)
cotha = 1. / tanha
s = (1. - # m=0 term; note alternating signs
np.polyval(_debye_v1_coeffs, cotha) / sigma + # m=1
np.polyval(_debye_v2_coeffs, cotha) / sigma**2 - # m=2
np.polyval(_debye_v3_coeffs, cotha) / sigma**3) # m=3
return np.exp(sigma * (alpha - tanha)) * s * np.sqrt(np.sinh(2 * alpha) / (np.pi * sigma))
NICHOLSON_SIGMA_CUT = 30. # made up
NICHOLSON_REL_TOL = 0.01 # made up
DEBYE_SIGMA_CUT = 30. # made up
DEBYE_REL_TOL = 0.1 # made up
@broadcastize(2)
def jv(sigma, x):
"Bessel function of first kind."
r = jv_scipy(sigma, x)
w = (sigma > NICHOLSON_SIGMA_CUT) & ((sigma - x) / sigma < NICHOLSON_REL_TOL)
r[w] = jv_nicholson(sigma[w], x[w])
w = (sigma > DEBYE_SIGMA_CUT) & (np.abs(np.cbrt(sigma) / (sigma - x)) < DEBYE_REL_TOL)
r[w] = jv_debye(sigma[w], x[w])
nf = ~np.isfinite(r)
#if nf.sum(): print('jv nf', sigma, x)
r[nf] = 0.
return r
@broadcastize(2)
def nv(sigma, x):
"Bessel function of second kind. AKA N_v"
r = nv_scipy(sigma, x)
w = (sigma > NICHOLSON_SIGMA_CUT) & ((sigma - x) / sigma < NICHOLSON_REL_TOL)
r[w] = nv_nicholson(sigma[w], x[w])
w = (sigma > DEBYE_SIGMA_CUT) & (np.abs(np.cbrt(sigma) / (sigma - x)) < DEBYE_REL_TOL)
r[w] = nv_debye(sigma[w], x[w])
nf = ~np.isfinite(r)
#if nf.sum(): print('nv nf', sigma, x)
r[nf] = 0.
return r
@broadcastize(2)
def jvp(sigma, x):
"First derivative of Bessel function of first kind."
r = jvp_scipy(sigma, x)
w = (sigma > NICHOLSON_SIGMA_CUT) & ((sigma - x) / sigma < NICHOLSON_REL_TOL)
r[w] = jvp_nicholson(sigma[w], x[w])
w = (sigma > DEBYE_SIGMA_CUT) & (np.abs(np.cbrt(sigma) / (sigma - x)) < DEBYE_REL_TOL)
r[w] = jvp_debye(sigma[w], x[w])
nf = ~np.isfinite(r)
#if nf.sum(): print('jvp nf', sigma, x)
r[nf] = 0.
return r
@broadcastize(2)
def nvp(sigma, x):
"First derivative of Bessel function of second kind. AKA N_v"
r = nvp_scipy(sigma, x)
w = (sigma > NICHOLSON_SIGMA_CUT) & ((sigma - x) / sigma < NICHOLSON_REL_TOL)
r[w] = nvp_nicholson(sigma[w], x[w])
w = (sigma > DEBYE_SIGMA_CUT) & (np.abs(np.cbrt(sigma) / (sigma - x)) < DEBYE_REL_TOL)
r[w] = nvp_debye(sigma[w], x[w])
nf = ~np.isfinite(r)
#if nf.sum(): print('nvp nf', sigma, x)
r[nf] = 0.
return r
@broadcastize(2)
def jvpnv_heyvaerts_debye(sigma, x):
"""Product of the first derivative of the Bessel function of the first kind
and the (not a derivative of) the Bessel function of the second kind, with
Heyvaerts' Debye approximation, used with large x and sigma .
Heyvaerts presents an expansion that makes these computations more
tractable at extreme values, where J_v is very small and N_v is very big.
"""
s2 = sigma**2
x2 = x**2
A1 = 0.125 - 5 * s2 / (s2 - x2)
A2 = 3./128 - 77 * s2 / (576 * (s2 - x2)) + 385 * s2**2 / (3456 * (s2 - x2)**2)
xA1p = -5 * s2 * x2 / (12 * (s2 - x2)**2)
return -1 / (np.pi * x) * (
1 +
x2 / (2 * (s2 - x2)**1.5) +
(6 * A2 + xA1p - A1**2) / (s2 - x2) +
3 * A1 * x2 / (2 * (s2 - x2)**2)
)
def jvpnv_scipy(sigma, x):
return jvp_scipy(sigma, x) * nv_scipy(sigma, x)
@broadcastize(2)
def jvpnv(sigma, x):
"""Product of the first derivative of the Bessel function of the first kind
and the (not a derivative of) the Bessel function of the second kind.
Heyvaerts presents an expansion that makes these computations more
tractable at extreme values, where J_v is very small and N_v is very big.
"""
r = np.empty_like(sigma)
# Places where we can't use the approximation.
w = (sigma < DEBYE_SIGMA_CUT) | (np.abs(np.cbrt(sigma) / (sigma - x)) > DEBYE_REL_TOL)
r[w] = jvp(sigma[w], x[w]) * nv(sigma[w], x[w])
# Places where we can.
w = ~w
r[w] = jvpnv_heyvaerts_debye(sigma[w], x[w])
return r
def K23L13(x):
"""K_{2/3}(x) * L_{1/3}(x)
Evaluating the sin denominators, K_{2/3}(x) = pi/sqrt(3)*[I_{-2/3}(x) - I_{2/3}(x)],
and analogously for L.
This appproximation is only supposed to kick in when x <~ 1., but I have
cases where I have x ~ 15. I think what's happening is that the NR/QR
structure of the problem is weird (sigma_QR_min < sigma_0) and Heyvaert's
assumptions about the problem geometry aren't so valid.
"""
tt = 2. / 3
ot = 1. / 3
if x < 10:
K = np.pi / np.sqrt(3) * (iv_scipy(-tt, x) - iv_scipy(tt, x))
else:
K = kv_scipy(tt, x)
L = np.pi / np.sqrt(3) * (iv_scipy(-ot, x) + iv_scipy(ot, x))
return K * L
def K13L13(x):
ot = 1. / 3
K = np.pi / np.sqrt(3) * (iv_scipy(-ot, x) - iv_scipy(ot, x))
L = np.pi / np.sqrt(3) * (iv_scipy(-ot, x) + iv_scipy(ot, x))
return K * L
def K23L23(x):
tt = 2. / 3
K = np.pi / np.sqrt(3) * (iv_scipy(-tt, x) - iv_scipy(tt, x))
L = np.pi / np.sqrt(3) * (iv_scipy(-tt, x) + iv_scipy(tt, x))
return K * L
def evaluate_generic(sigma_max, s, theta, func, nsigma=64, npomega=64, **kwargs):
sin_theta = np.sin(theta)
cos_theta = np.cos(theta)
sigma0 = s * sin_theta
if sigma_max < 0:
sigma_max = np.abs(sigma_max) * sigma0
else:
assert sigma_max > sigma0
sigmas = np.linspace(sigma_max, sigma0, nsigma) # backwards so default view looks more intuitive
pomega_max = np.sqrt(sigma_max**2 - sigma0**2) * 0.999999 # hack to avoid roundoffs => negative sqrts
pomegas = np.linspace(-pomega_max, pomega_max, npomega)
plane = np.ma.zeros((nsigma, npomega))
plane.mask = np.ma.ones((nsigma, npomega), dtype=np.bool)
for i in range(nsigma):
sigma = sigmas[i]
this_pomega_max = np.sqrt(sigma**2 - sigma0**2)
j0, j1 = np.searchsorted(pomegas, [-this_pomega_max, this_pomega_max])
these_pomegas = pomegas[j0:j1]
x = np.sqrt(sigma**2 - these_pomegas**2 - sigma0**2)
gamma = (sigma - these_pomegas * cos_theta) / (s * sin_theta**2)
mu = (sigma * cos_theta - these_pomegas) / (s * sin_theta**2 * np.sqrt(gamma**2 - 1))
v = func(
s = s,
sigma = sigma,
pomega = these_pomegas,
gamma = gamma,
x = x,
mu = mu,
sin_theta = sin_theta,
cos_theta = cos_theta,
sigma0 = sigma0,
**kwargs
)
plane[i,j0:j1] = v
plane.mask[i,j0:j1] = False
return sigmas, pomegas, plane
def fake_integrate_generic(sigma_max, s, theta, func, **kwargs):
"Not demonstrated to actually work!!!"
def volume_unit(**kwargs):
return kwargs['gamma'] * func(**kwargs)
sigmas, pomegas, plane = evaluate_generic(sigma_max, s, theta, volume_unit, **kwargs)
dsigma = sigmas[0] - sigmas[1] # recall that sigmas are backwards
dpomega = pomegas[1] - pomegas[0]
return FOUR_PI_M3_C3 * plane.filled(0.).sum() * dsigma * dpomega / (2 * s**2 * np.sin(theta)**2)
def real_integrate_generic(sigma_max, s, theta, func, edit_bounds=None, limit=5000, **kwargs):
"""This integrates over sigma and pomega using the physical bounds defined in
Heyvaerts, but without any prefactors or Jacobian terms. As such it returns
the full output from `scipy.integrate.quad`.
"""
sin_theta = np.sin(theta)
cos_theta = np.cos(theta)
sigma0 = s * sin_theta
if sigma_max < 0:
sigma_max = np.abs(sigma_max) * sigma0
else:
assert sigma_max > sigma0
inner_kwargs = dict(
s = s,
sin_theta = sin_theta,
cos_theta = cos_theta,
sigma0 = sigma0
)
def inner_integrand(pomega, sigma):
inner_kwargs['pomega'] = pomega
inner_kwargs['x'] = np.sqrt(sigma**2 - pomega**2 - sigma0**2)
gamma = inner_kwargs['gamma'] = (sigma - pomega * cos_theta) / (s * sin_theta**2)
inner_kwargs['mu'] = (sigma * cos_theta - pomega) / (s * sin_theta**2 * np.sqrt(gamma**2 - 1))
return func(**inner_kwargs)
def outer_integrand(sigma):
inner_kwargs['sigma'] = sigma
pomega_max = np.sqrt(sigma**2 - sigma0**2)
if edit_bounds is None:
pomega_min = -pomega_max
else:
inner_kwargs['pomega_max'] = pomega_max
pomega_min, pomega_max = edit_bounds(**inner_kwargs)
r = quad(inner_integrand, pomega_min, pomega_max, args=(sigma,), limit=2048)[0]
#print('O', sigma, r)
return r
return quad(outer_integrand, sigma0, sigma_max, limit=limit, **kwargs)
def _sample_integral_inner_integrand(pomega, sigma, func, inner_kwargs, s, sigma0, sin_theta, cos_theta):
inner_kwargs['sigma'] = sigma
inner_kwargs['pomega'] = pomega
inner_kwargs['x'] = np.sqrt(sigma**2 - pomega**2 - sigma0**2)
gamma = inner_kwargs['gamma'] = (sigma - pomega * cos_theta) / (s * sin_theta**2)
inner_kwargs['mu'] = (sigma * cos_theta - pomega) / (s * sin_theta**2 * np.sqrt(gamma**2 - 1))
r = func(**inner_kwargs)
#print('Z', pomega, sigma, r, inner_kwargs['x'], gamma)
return r
def sample_integral(sigma_max, s, theta, func, nsigma=20, log=False, edit_bounds=None, parallel=True):
"""Sample integrals along the pomega axis.
"""
sin_theta = np.sin(theta)
cos_theta = np.cos(theta)
sigma0 = s * sin_theta
if sigma_max < 0:
sigma_max = np.abs(sigma_max) * sigma0
else:
assert sigma_max > sigma0
inner_kwargs = dict(
s = s,
sin_theta = sin_theta,
cos_theta = cos_theta,
sigma0 = sigma0
)
if log:
sigma = np.logspace(np.log10(sigma0), np.log10(sigma_max), nsigma)
else:
sigma = np.linspace(sigma0, sigma_max, nsigma)
pomega_max = np.sqrt(sigma**2 - sigma0**2)
pomega_min = -pomega_max
# If we're using a distribution with cutoffs that are not easily expressed in sigma/pomega
# space, it makes life a lot easier to manually edit the boundaries
if edit_bounds is None:
pomega_min = -pomega_max
else:
edit_kwargs = dict(inner_kwargs)
edit_kwargs['pomega_max'] = pomega_max
edit_kwargs['sigma'] = sigma
pomega_min, pomega_max = edit_bounds(**edit_kwargs)
# The integrand function has to be standalone so that multiprocessing can
# do its (low-quality) magic.
vals, errors = parallel_quad(
_sample_integral_inner_integrand,
pomega_min, pomega_max,
(sigma,), # parallelized arguments
(func, inner_kwargs, s, sigma0, sin_theta, cos_theta), # repeated arguments
parallel = parallel,
limit = 1024,
)
return sigma, vals, errors
def physical_integrate_generic(sigma_max, s, theta, func, **kwargs):
"""This integrates over sigma and pomega with the Jacobian needed for the
result to work out to a correctly-weighted integral over all of momentum
space.
"""
def volume_factor(**kwargs):
return kwargs['gamma'] * func(**kwargs)
return FOUR_PI_M3_C3 * real_integrate_generic(sigma_max, s, theta, volume_factor, **kwargs)[0] \
/ (2 * s**2 * np.sin(theta)**2)
def evaluate_var(sigma_max, s, theta, name, **kwargs):
def get_var(**kwargs):
return kwargs[name]
return evaluate_generic(sigma_max, s, theta, get_var, **kwargs)
def evaluate_qe_f_weight(sigma_max, s, theta, **kwargs):
"""This is the term that appears in the `f` integrand in the quasi-exact
integral, equation 25, that does not depend on the distribution function.
We don't worry about the prefactor.
Large values of sigma can easily yield NaNs and infs from the Bessel
function evaluators.
This term appears to be odd in pomega.
"""
def get(pomega=None, x=None, sigma=None, **kwargs):
return pomega * (x * jvpnv(sigma, x) + 1. / np.pi)
return evaluate_generic(sigma_max, s, theta, get, **kwargs)
def evaluate_qe_h_weight(sigma_max, s, theta, **kwargs):
"""This is the term that multiplies the distribution function derivative in
the first term of the `h` integrand in the quasi-exact integral, equation
26, that does not depend on the distribution function. We don't worry
about the prefactor.
Large values of sigma can easily yield NaNs and infs from the Bessel
function evaluators.
This term appears to be independent of sigma and even in pomega.
"""
def get(pomega=None, x=None, sigma=None, **kwargs):
return x**2 * jvp(sigma, x) * nvp(sigma, x) - pomega**2 * jv(sigma, x) * nv(sigma, x)
return evaluate_generic(sigma_max, s, theta, get, **kwargs)
def evaluate_bqr(sigma_max, s, theta, **kwargs):
"""This is B_QR, equation 30, which indicates whether a given set of particle
parameters interact with the wave in the non-resonant (NR; B_QR > 0) or
quasi-resonant (QR; B_QR < 0) mode.
"""
k = 3**(2./3)
def get(pomega=None, sigma=None, sigma0=None, **kwargs):
return pomega**2 - k * sigma**(4./3) + sigma0**2
return evaluate_generic(sigma_max, s, theta, get, **kwargs)
def evaluate_qr(sigma_max, s, theta, func, nsigma=64, npomega=64, **kwargs):
"""Sample a 2D plane, but only inside the QR (quasi-resonant) region."""
sin_theta = np.sin(theta)
cos_theta = np.cos(theta)
sigma0 = s * sin_theta
sigma_low = max(sigma0, sigma0**1.5 / np.sqrt(3))
three_two_thirds = 3**(2./3)
if sigma_max < 0:
sigma_max = np.abs(sigma_max) * sigma_low
else:
assert sigma_max > sigma_low
sigmas = np.linspace(sigma_max, sigma0, nsigma) # backwards so default view looks more intuitive
pomega_max = np.sqrt(three_two_thirds * sigma_max**(4./3) - sigma0**2)
pomegas = np.linspace(-pomega_max, pomega_max, npomega)
plane = np.ma.zeros((nsigma, npomega))
plane.mask = np.ma.ones((nsigma, npomega), dtype=np.bool)
for i in range(nsigma):
sigma = sigmas[i]
this_pomega_max = min(
np.sqrt(three_two_thirds * sigma**(4./3) - sigma0**2) * 0.999999, # hack as above
np.sqrt(sigma**2 - sigma0**2) * 0.999999, # full physical boundary
)
j0, j1 = np.searchsorted(pomegas, [-this_pomega_max, this_pomega_max])
these_pomegas = pomegas[j0:j1]
x = np.sqrt(sigma**2 - these_pomegas**2 - sigma0**2)
gamma = (sigma - these_pomegas * cos_theta) / (s * sin_theta**2)
mu = (sigma * cos_theta - these_pomegas) / (s * sin_theta**2 * np.sqrt(gamma**2 - 1))
v = func(
s = s,
sigma = sigma,
pomega = these_pomegas,
gamma = gamma,
x = x,
mu = mu,
sin_theta = sin_theta,
cos_theta = cos_theta,
sigma0 = sigma0,
**kwargs
)
plane[i,j0:j1] = v
plane.mask[i,j0:j1] = False
return sigmas, pomegas, plane
# Full integral broken into NR and QR contributions
def nrqr_integrate_generic(s, theta, nr_func, qr_func, limit=4096, **kwargs):
"""Integrate over the physical domain, breaking the integration into the NR
and QR domains for which differente functions are used. No prefactors or
internal multipliers are used, so the two functions must include the
appropriate internal terms and have the same external scaling.
"""
sin_theta = np.sin(theta)
cos_theta = np.cos(theta)
sigma0 = s * sin_theta
inner_kwargs = dict(
s = s,
sin_theta = sin_theta,
cos_theta = cos_theta,
sigma0 = sigma0
)
# Prepare the NR integral: pomega on the outside, sigma on the inside
def inner_integrand(sigma, pomega, func):
inner_kwargs['sigma'] = sigma
inner_kwargs['x'] = np.sqrt(sigma**2 - pomega**2 - sigma0**2)
gamma = inner_kwargs['gamma'] = (sigma - pomega * cos_theta) / (s * sin_theta**2)
inner_kwargs['mu'] = (sigma * cos_theta - pomega) / (s * sin_theta**2 * np.sqrt(gamma**2 - 1))
r = func(**inner_kwargs)
#print('IP', inner_kwargs, sigma, pomega, r)
return r
def outer_integrand(pomega, func, **kwargs):
inner_kwargs['pomega'] = pomega
sigma_min = np.sqrt(pomega**2 + sigma0**2)
sigma_max = sigma_min**1.5 / 3**0.5
if sigma_max <= sigma_min:
return 0.
r = quad(inner_integrand, sigma_min, sigma_max, args=(pomega, func), limit=limit, **kwargs)[0]
#print('OP', pomega, r)
return r
integrate = lambda p1, p2, f: quad(
outer_integrand, p1, p2, args=(f,), limit=limit, **kwargs
)[0]
def derivative(pomega, func):
eps = np.abs(pomega) * 1e-6
v1 = outer_integrand(pomega, func, epsrel=1e-6)
v2 = outer_integrand(pomega + eps, func, epsrel=1e-6)
norm = np.abs([v1, v2]).max()
if norm == 0.:
return 0.
return (v2 - v1) / (norm * eps)
# Always start by integrating over the center of the NR region.
pomega_left = -3 * sigma0
pomega_right = 3 * sigma0
delta_left = delta_right = pomega_right
TOL = 1e-5
keep_going = True
DELTA_SCALE_FACTOR = 5
# This can be zero if we're in the "low-frequency" regime such that our
# initial pomegas just don't cover any NR area.
nr_val = quad(outer_integrand, pomega_left, pomega_right, args=(nr_func,), **kwargs)[0]
while keep_going:
if nr_val != 0.:
# I think this logic is OK regardless of the sign of rel_deriv,
# although the motivation is based on a view in which it's
# negative. We don't take the derivative on the first pass since
# it is often hard to compute at minimal values of sigma.
rel_deriv = derivative(pomega_right, nr_func)
if rel_deriv == 0. or abs(1. / (rel_deriv * delta_right)) > DELTA_SCALE_FACTOR:
delta_right *= DELTA_SCALE_FACTOR
##print('delta_right bumped to', delta_right)
contrib = integrate(pomega_right, pomega_right + delta_right, nr_func)
if nr_val == 0.:
#print('cr(0):', pomega_right, contrib, '--')
pass
else:
#print('cr:', np.abs(contrib / nr_val))
keep_going = np.abs(contrib / nr_val) > TOL
nr_val += contrib
pomega_right += delta_right
keep_going = True
while keep_going:
rel_deriv = derivative(pomega_left, nr_func)
if rel_deriv == 0. or abs(1. / (rel_deriv * delta_left)) > DELTA_SCALE_FACTOR:
delta_left *= DELTA_SCALE_FACTOR
##print('delta_left bumped to', delta_left)
contrib = integrate(pomega_left - delta_left, pomega_left, nr_func)
#print('cl:', np.abs(contrib / nr_val))
keep_going = np.abs(contrib / nr_val) > TOL
nr_val += contrib
pomega_left -= delta_left
# Now the QR contribution, changing the order in which we evaluate the integral.
# Sigma is now on the outside, pomega on the inside.
def inner_integrand(pomega, sigma, func):
inner_kwargs['pomega'] = pomega
inner_kwargs['x'] = np.sqrt(sigma**2 - pomega**2 - sigma0**2)
#print('ZZZ', sigma, pomega, sigma0, inner_kwargs['x'])
gamma = inner_kwargs['gamma'] = (sigma - pomega * cos_theta) / (s * sin_theta**2)
inner_kwargs['mu'] = (sigma * cos_theta - pomega) / (s * sin_theta**2 * np.sqrt(gamma**2 - 1))
r = func(**inner_kwargs)
#print('%.18e %.18e %.18e' % (sigma, pomega, r))
return r
three_two_thirds = 3**(2./3)
def outer_integrand(sigma, func, **kwargs):
inner_kwargs['sigma'] = sigma
pomega_max = min(
np.sqrt(three_two_thirds * sigma**(4./3) - sigma0**2), # QR boundary
np.sqrt(sigma**2 - sigma0**2), # full physical boundary
)
pomega_min = -pomega_max
#print()
#print('OS BEGIN:', sigma)
r = quad(inner_integrand, pomega_min, pomega_max, args=(sigma, func), limit=limit, **kwargs)[0]
#print('OS END:', sigma, '=>', r)
#print()
return r
integrate = lambda s1, s2, f: quad(
outer_integrand, s1, s2, args=(f,), limit=limit, **kwargs
)[0]
def derivative(sigma, func):
eps = np.abs(sigma) * 1e-6
v1 = outer_integrand(sigma, func, epsrel=1e-6)
v2 = outer_integrand(sigma + eps, func, epsrel=1e-6)
norm = np.abs([v1, v2]).max()
if norm == 0.:
return 0.
return (v2 - v1) / (norm * eps)
sigma_low = max(sigma0, sigma0**1.5 / np.sqrt(3))
delta_sigma = 1 * sigma0
qr_val = 0.
keep_going = True
DELTA_SCALE_FACTOR = 5
while keep_going:
if qr_val != 0.:
rel_deriv = derivative(sigma_low, qr_func)
if rel_deriv == 0. or abs(1. / (rel_deriv * delta_sigma)) > DELTA_SCALE_FACTOR:
delta_sigma *= DELTA_SCALE_FACTOR
##print('delta_sigma bumped to', delta_sigma)
contrib = integrate(sigma_low, sigma_low + delta_sigma, qr_func)
if qr_val == 0.:
#print('cs(0):', sigma_low, contrib, nr_val)
pass
else:
#print('cs:', sigma_low, contrib, np.abs(contrib / qr_val))
keep_going = np.abs(contrib / qr_val) > TOL
qr_val += contrib
sigma_low += delta_sigma
#print('final NR:', nr_val, ' QR:', qr_val)
return nr_val + qr_val
# TODO: isotropy for now
class Distribution(object):
def dfdsigma(self, **kwargs):
dfdg = self.dfdg(**kwargs)
g_term = dfdg / (kwargs['s'] * kwargs['sin_theta']**2)
dfdcx = self.dfdcx(**kwargs)
if dfdcx == 0.:
mu_term = 0.
else:
# I'm sure this could be simplified but this works so let's just
# run with it for now. I just banged this out in sympy and checked
# numerically.
sigma = kwargs['sigma']
pomega = kwargs['pomega']
sinth = kwargs['sin_theta']
costh = kwargs['cos_theta']
s = kwargs['s']
Q = sigma - pomega * costh
R = pomega - sigma * costh
T = s * sinth**2
U = Q**2 - T**2
dcxdsigma = (Q * U * costh + U * R + R * T**2) / (U**1.5 * Q)
mu_term = dcxdsigma * dfdcx
return g_term + mu_term
def check_normalization(self, sigma_max=np.inf, s=DEFAULT_S, theta=DEFAULT_THETA, **kwargs):
"""Should return 1 if this distribution is normalized correctly."""
return physical_integrate_generic(sigma_max, s, theta, self.just_f,
edit_bounds=self.edit_pomega_bounds, **kwargs)
def edit_pomega_bounds(self, pomega_max=None, **kwargs):
return (-pomega_max, pomega_max)
# Single quasi-exact representation of the integrand.
def f_qe_element(self, **kwargs):
"""Evaluate the integrand of the quasi-exact expression for `f` using this
distribution function.
Heyvaerts equation 25. We include the prefactors that are not part of
the "standard Heyvaerts integral"; namely, 2 pi^2 / c; we fix s_q =
-1. This is to ease comparability with the other expressions, which
have different prefactor choices in the various integrals that are
written in the paper.
Large values of sigma can easily yield NaNs and infs from the Bessel
function evaluators.
This function has a severe change in behavior across the NR/QR
boundary; numerical integrals are, I believe, better performed using
the more specialized _nr_ and _qr_ functions.
"""
po = kwargs['pomega']
sg = kwargs['sigma']
x = kwargs['x']
pf = 2 * np.pi**2 / cgs.c
return pf * self.dfdsigma(**kwargs) * po * (x * jvpnv(sg, x) + 1. / np.pi)
def f_qe(self, sigma_max=np.inf, s=DEFAULT_S, theta=DEFAULT_THETA, omega_p=1., omega=1.,
epsrel=1e-3, **kwargs):
"""Calculate `f` in the quasi-exact regime.
The returned value is multiplied by `omega_p**2 / omega`. These two parameters
are dimensional but do not figure into the calculation otherwise.
"""
integral = real_integrate_generic(sigma_max, s, theta, self.f_qe_element, epsrel=epsrel,
edit_bounds=self.edit_pomega_bounds, **kwargs)[0]
std_prefactor = M3_C3 * omega_p**2 / (s**2 * np.sin(theta)**2 * omega)
return std_prefactor * integral
def h_qe_element(self, **kwargs):
"""Evaluate the integrand of the quasi-exact expression for `h` using this
distribution function.
SKIPPING d(F_0)/d(pomega) TERM! Hence the IKWID.
Heyvaerts equation 26, wrapping in prefactors as described in `f_qe_element`.
"""
po = kwargs['pomega']
sg = kwargs['sigma']
x = kwargs['x']
dfds = self.dfdsigma(**kwargs)
assert IKWID, 'Do not use this; no d(F)/d(pomega) term.'
pf = np.pi**2 / cgs.c
return pf * dfds * (x**2 * jvp(sg, x) * nvp(sg, x) - po**2 * jv(sg, x) * nv(sg, x) - sg / np.pi)
def h_qe(self, sigma_max=np.inf, s=DEFAULT_S, theta=DEFAULT_THETA, omega_p=1., omega=1.,
epsrel=1e-3, **kwargs):
"""Calculate `h` in the quasi-exact regime.
The returned value is multiplied by `omega_p**2 / omega`. These two parameters
are dimensional but do not figure into the calculation otherwise.
"""
integral = real_integrate_generic(sigma_max, s, theta, self.h_qe_element, epsrel=epsrel,
edit_bounds=self.edit_pomega_bounds, **kwargs)[0]
std_prefactor = M3_C3 * omega_p**2 / (s**2 * np.sin(theta)**2 * omega)
return std_prefactor * integral
# Split QR/NR approach
def f_nr_element(self, **kwargs):
"""Evaluate the integrand of the non-resonant expression for `f` using this
distribution function.
Equation 115, with prefactors folded in in the usual way. Note that there's a
1/pi inside the integral.
"""
dfds = self.dfdsigma(**kwargs)
po = kwargs['pomega']
sg = kwargs['sigma']
x = kwargs['x']
s2 = sg**2
x2 = x**2
s2mx2 = s2 - x2
A1 = 0.125 - 5 * s2 / (24 * s2mx2)
A2 = 3./128 - 77 * s2 / (576 * s2mx2) + 385 * s2**2 / (3456 * s2mx2**2)
xA1p = -5 * s2 * x2 / (12 * s2mx2**2)
z = x2 / (2 * s2mx2**1.5) + (6 * A2 + xA1p - A1**2) / s2mx2 + 3 * A1 * x2 / (2 * s2mx2**2)
pf = -2 * np.pi / cgs.c
return pf * z * po * dfds
def h_nr_element(self, **kwargs):
"""Evaluate the integrand of the non-resonant expression for `h` using this
distribution function.
Equation 119, with usual prefactorization.
"""
dfds = self.dfdsigma(**kwargs)
po = kwargs['pomega']
sg = kwargs['sigma']
x = kwargs['x']
s2 = sg**2
x2 = x**2
s2mx2 = s2 - x2
A1 = 0.125 - 5 * s2 / (24 * s2mx2)
A2 = 3./128 - 77 * s2 / (576 * s2mx2) + 385 * s2**2 / (3456 * s2mx2**2)
xA1p = -5 * s2 * x2 / (12 * s2mx2**2)
t1 = (6 * A2 - A1**2 + xA1p) / s2mx2**0.5 + A1 * x2 / s2mx2**1.5 - x2**2 / (8 * s2mx2**2.5)
t2 = (6 * A2 - A1**2) / s2mx2**1.5
u1 = 2 * t1 - kwargs['sigma0']**2 * t2
pf = np.pi / cgs.c
return pf * dfds * u1
def f_qr_element(self, **kwargs):
"""Evaluate the integrand of the quasi-resonant expression for `f` using this
distribution function.
Equation 99, moving the x inside the parentheses, and using the usual
prefactor treatment.
"""
dfds = self.dfdsigma(**kwargs)
po = kwargs['pomega']
sg = kwargs['sigma']
x = kwargs['x']
g = np.sqrt(8.) * (sg - x)**1.5 / (3 * np.sqrt(x))
z = np.sqrt(3.) * g * K23L13(g) - np.pi
pf = -2 / cgs.c
#print('Z %.18e %.18e %.18e %.18e %.18e %.18e' % (pf, po, dfds, z, g, K23L13(g)))
return pf * po * dfds * z
def h_qr_element(self, **kwargs):
"""Evaluate the integrand of the quasi-resonant expression for `h` using this
distribution function.
Equation 120, usual prefactor treatment.
"""
dfds = self.dfdsigma(**kwargs)
po2 = kwargs['pomega']**2
sg = kwargs['sigma']
x = kwargs['x']
s02 = kwargs['sigma0']**2
smxox = (sg - x) / x
g = np.sqrt(8.) * (sg - x)**1.5 / (3 * np.sqrt(x))
t1 = 4 * x**2 * smxox**2 / np.sqrt(3) * K23L23(g)
t2 = 2 * po2 * smxox / np.sqrt(3) * K13L13(g)
t3 = -np.pi * (2 * po2 + s02) / np.sqrt(po2 + s02)
pf = 1. / cgs.c
return pf * (t1 + t2 + t3) * dfds
def f_nrqr(self, s=DEFAULT_S, theta=DEFAULT_THETA, omega_p=1., omega=1.,
epsrel=1e-3, **kwargs):
"""Calculate `f` in the quasi-exact regime using the split NR/QR approximations.
The returned value is multiplied by `omega_p**2 / omega`. These two parameters
are dimensional but do not figure into the calculation otherwise.
"""
integral = nrqr_integrate_generic(
s, theta,
self.f_nr_element, self.f_qr_element,
epsrel = epsrel,
**kwargs
)
std_prefactor = M3_C3 * omega_p**2 / (s**2 * np.sin(theta)**2 * omega)
return std_prefactor * integral
def h_nrqr(self, s=DEFAULT_S, theta=DEFAULT_THETA, omega_p=1., omega=1.,
epsrel=1e-3, use_qe=False, **kwargs):
"""Calculate `h` in the quasi-exact regime using the split NR/QR approximations.
The returned value is multiplied by `omega_p**2 / omega`. These two parameters
are dimensional but do not figure into the calculation otherwise.
"""
if use_qe:
nr_element = self.h_qr_element
qr_element = self.h_qe_element
else:
nr_element = self.h_nr_element
qr_element = self.h_qr_element
integral = nrqr_integrate_generic(
s, theta,
nr_element, qr_element,
epsrel=epsrel,
**kwargs
)
std_prefactor = M3_C3 * omega_p**2 / (s**2 * np.sin(theta)**2 * omega)
return std_prefactor * integral
class IsotropicDistribution(Distribution):
"""For isotropic distributions, there are simpler integrals that we can
evaluate to help us check that we're getting accurate answers.
"""
def isotropic_gamma_max(self, fraction=0.999, gamma0=5.):
"""Find the Lorentz factor gamma_max such that *fraction* of all electrons
have Lorentz factors of gamma_max or smaller.
"""
remainder = 1. - fraction
def integral_diff(gamma):
return FOUR_PI_M3_C3 * quad(
lambda g: g * np.sqrt(g**2 - 1) * self.just_f(gamma=g),
gamma, np.inf
)[0] - remainder
from scipy.optimize import brentq
return brentq(integral_diff, 1., 1e10)
def isotropic_hf_s_min(self, theta=DEFAULT_THETA, fraction=0.999):
"""Find the minimal harmonic number *s_min* such that calculations performed
with `s > s_min` are in Heyvaert's HF (high-frequency) regime for
essentially all particles. This value depends on the viewing angle
*theta*.
We determine this number by finding the
value *gamma_max* that contains the substantial majority of all
particles have Lorentz factors less than *gamma_max*.
See Heyvaerts equation 27 and surrounding discussion.
Note that we recalculate *gamma_max* every time we are called; the
value could be cached.
"""
gamma_max = self.isotropic_gamma_max(fraction=fraction)
return 3 * gamma_max**2 * np.sin(theta)
def isotropic_f_hf(self, s=DEFAULT_S, theta=DEFAULT_THETA, omega_p=1.,
omega=1., **kwargs):
"""Calculate "f" for an isotropic distribution function, assuming that we can
invoke Heyvaert's HF (high-frequency) limit for all particles. This
assumption holds if *s* is larger than `self.isotropic_hf_s_min()`.
This function does not check that this condition holds, though.
"""
sin_theta = np.sin(theta)
cos_theta = np.cos(theta)
def integrand(gamma):
ell = np.log(np.sqrt(gamma**2 - 1) + gamma)
F_iso_HF = -4 * np.pi * s * cos_theta * (gamma * ell - np.sqrt(gamma**2 - 1))
return self.dfdg(gamma=gamma) * F_iso_HF
integral = quad(integrand, 1., np.inf)[0]
return omega_p**2 * M3_C3 * integral / (cgs.c * s**2 * omega)
def isotropic_h_hf(self, s=DEFAULT_S, theta=DEFAULT_THETA, omega_p=1.,
omega=1., **kwargs):
"""Calculate "h" for an isotropic distribution function, assuming that we can
invoke Heyvaert's HF (high-frequency) limit for all particles. This
assumption holds if *s* is larger than `self.isotropic_hf_s_min()`.
This function does not check that this condition holds, though.
"""
sin_theta = np.sin(theta)
def integrand(gamma):
ell = np.log(np.sqrt(gamma**2 - 1) + gamma)
H_iso_HF = -0.5 * np.pi * sin_theta**2 * (gamma * np.sqrt(gamma**2 - 1) * (2 * gamma**2 - 3) - ell)
return self.dfdg(gamma=gamma) * H_iso_HF
integral = quad(integrand, 1., np.inf)[0]
return omega_p**2 * M3_C3 * integral / (cgs.c * s**2 * omega)
def isotropic_f_lf(self, s=DEFAULT_S, theta=DEFAULT_THETA, omega_p=1.,
omega=1., **kwargs):
"""Calculate "f" for an isotropic distribution function, assuming that we can
invoke Heyvaert's LF (low-frequency) limit for all particles.
TODO: unclear just when that assumption holds.
"""
sin_theta = np.sin(theta)
cos_theta = np.cos(theta)
prefactor = -np.pi * s * cos_theta
def integrand(gamma):
F_iso_LF = prefactor * gamma * (4. * np.log(gamma * s / sin_theta) / 3 - 1.26072439)
return self.dfdg(gamma=gamma) * F_iso_LF
integral = quad(integrand, 1., np.inf)[0]
return omega_p**2 * M3_C3 * integral / (cgs.c * s**2 * omega)
def isotropic_h_lf(self, s=DEFAULT_S, theta=DEFAULT_THETA, omega_p=1.,
omega=1., **kwargs):
"""Calculate "h" for an isotropic distribution function, assuming that we can
invoke Heyvaert's LF (low-frequency) limit for all particles.
TODO: unclear just when that assumption holds.
To reproduce the right panel of Heyvaerts Figure 2, use the thermal
Juettner distribution, s = 15, theta = pi/4, omega_p = omega = 1, and
multiply the result by (-c * s**2). If I *don't* multiply by the
constants, for T = 724, I think h ~= -5.2e-18; for T = 43, h ~=
-5.8e-16.
"""
sin_theta = np.sin(theta)
prefactor = np.pi / 8 * (4 - 3**(-4./3))
def integrand(gamma):
H_iso_LF = prefactor * (s**2 * sin_theta)**(2./3) * gamma**(4./3)
return self.dfdg(gamma=gamma) * H_iso_LF
integral = quad(integrand, 1., np.inf)[0]
return omega_p**2 * M3_C3 * integral / (cgs.c * s**2 * omega)
class PowerLawDistribution(IsotropicDistribution):
"""To ease the integration at this exploratory stage, we do not implement a
minimum gamma cutoff.
"""
def __init__(self, n):
self.neg_n = -n
self.norm = 1. / (FOUR_PI_M3_C3 * quad(lambda g: g**(1 + self.neg_n) * np.sqrt(g**2 - 1),
1., np.inf, epsrel=1e-5, limit=1000)[0])
assert self.norm > 0
def just_f(self, gamma=None, **kwargs):
return self.norm * gamma**self.neg_n
def dfdg(self, gamma=None, **kwargs):
return self.norm * self.neg_n * gamma**(self.neg_n - 1)
def dfdcx(self, **kwargs):
"d(f)/d(cos xi), where xi is the pitch angle."
return 0.
class CutoffPowerLawDistribution(IsotropicDistribution):
def __init__(self, gmin, n):
self.neg_n = -n
self.gmin = gmin
self.norm = 1. / (FOUR_PI_M3_C3 * quad(lambda g: g**(1 + self.neg_n) * np.sqrt(g**2 - 1),
gmin, np.inf, epsrel=1e-5, limit=1000)[0])
assert self.norm > 0
def edit_pomega_bounds(self, s=None, sin_theta=None, cos_theta=None, sigma=None, pomega_max=None, **kwargs):
pomcut = (sigma - self.gmin * s * sin_theta**2) / cos_theta
return (-pomega_max, np.minimum(pomega_max, pomcut))
def just_f(self, gamma=None, **kwargs):
# Sigh.
if isinstance(gamma, np.ndarray):
f = self.norm * gamma**self.neg_n
f[gamma < self.gmin] = 0.
return f
else:
if gamma < self.gmin:
return 0.
return self.norm * gamma**self.neg_n
def dfdg(self, gamma=None, **kwargs):
if isinstance(gamma, np.ndarray):
# XXX just ignoring the discontinuity at gamma = gmin!!!
f = self.norm * self.neg_n * gamma**(self.neg_n - 1)
f[gamma < self.gmin] = 0.
return f
else:
if gamma < self.gmin:
return 0.
return self.norm * self.neg_n * gamma**(self.neg_n - 1)
def dfdcx(self, **kwargs):
"d(f)/d(cos xi), where xi is the pitch angle."
return 0.
class CutoffGammaSpacePowerLawDistribution(IsotropicDistribution):
"""This is the power-law distribution used by Huang & Shcherbakov 2011: the
number density is power-law distributed in gamma, not in momentum space.
That works out to mean that the `gamma sqrt(gamma^2 - 1)` term is divided
out of what we call "f".
"""
def __init__(self, gmin, n):
self.neg_n = -n
self.gmin = gmin
self.norm = (n - 1) / (FOUR_PI_M3_C3 * gmin**(1 - n))
def just_f(self, gamma=None, **kwargs):
# Sigh.
if isinstance(gamma, np.ndarray):
f = self.norm * gamma**(self.neg_n - 1) / np.sqrt(gamma**2 - 1)
f[gamma < self.gmin] = 0.
return f
else:
if gamma < self.gmin:
return 0.
return self.norm * gamma**(self.neg_n - 1) / np.sqrt(gamma**2 - 1)
def dfdg(self, gamma=None, **kwargs):
array = isinstance(gamma, np.ndarray)
if not array and gamma < self.gmin:
return 0.
# XXX just ignoring the discontinuity at gamma = gmin!!!
t1 = (-self.neg_n + 1) / gamma**2 + 1 / (gamma**2 - 1)
f = -self.norm * gamma**self.neg_n / | np.sqrt(gamma**2 - 1) | numpy.sqrt |
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 29 20:55:53 2018
Image dataset loader for a .txt file with a sample per line in the format
'path of image start_frame verb_id noun_id'
@author: Γιώργος
"""
import os
import pickle
import cv2
import numpy as np
from scipy.spatial.distance import pdist, squareform
from torch.utils.data import Dataset as torchDataset
from utils.video_sampler import RandomSampling, SequentialSampling, MiddleSampling, DoubleFullSampling, FullSampling
def get_class_weights(list_file, num_classes, use_mapping):
samples_list = parse_samples_list(list_file, DataLine)
counts = np.zeros(num_classes)
mapping = None
if use_mapping:
mapping = make_class_mapping(samples_list)
for s in samples_list:
counts[mapping[s.label_verb]] += 1
else:
for s in samples_list:
counts[s.label_verb] += 1
weights = 1 / counts
weights = weights / np.sum(weights)
return weights.astype(np.float32)
def make_class_mapping(samples_list):
classes = []
for sample in samples_list:
if sample.label_verb not in classes:
classes.append(sample.label_verb)
classes = np.sort(classes)
mapping_dict = {}
for i, c in enumerate(classes):
mapping_dict[c] = i
return mapping_dict
def make_class_mapping_generic(samples_list, attribute):
classes = []
for sample in samples_list:
label = getattr(sample, attribute)
if label not in classes:
classes.append(label)
classes = np.sort(classes)
mapping_dict = {}
for i, c in enumerate(classes):
mapping_dict[c] = i
return mapping_dict
def load_pickle(tracks_path):
with open(tracks_path, 'rb') as f:
tracks = pickle.load(f)
return tracks
def substitute_prefix(tracks_path, secondary_prefix):
obj_path = secondary_prefix
for p in tracks_path.split('\\')[1:]:
obj_path = os.path.join(obj_path, p)
return obj_path
def load_two_pickle(tracks_path, secondary_prefix):
obj_path = substitute_prefix(tracks_path, secondary_prefix)
return load_pickle(tracks_path), load_pickle(obj_path)
def load_point_samples(samples_list, bpv_prefix=None):
if bpv_prefix:
data_arr = [load_two_pickle(samples_list[index].data_path, bpv_prefix) for index in range(len(samples_list))]
else:
data_arr = [load_pickle(samples_list[index].data_path) for index in range(len(samples_list))]
return data_arr
# from PIL import Image
def load_images(data_path, frame_indices, image_tmpl):
images = []
# images = np.zeros((len(frame_indices), 640, 480, 3))
for f_ind in frame_indices:
im_name = os.path.join(data_path, image_tmpl.format(f_ind))
# next_image = np.array(Image.open(im_name).convert('RGB'))
next_image = cv2.imread(im_name, cv2.IMREAD_COLOR)
next_image = cv2.cvtColor(next_image, cv2.COLOR_BGR2RGB)
images.append(next_image)
# images[i] = next_image
return images
def prepare_sampler(sampler_type, clip_length, frame_interval):
if sampler_type == "train":
train_sampler = RandomSampling(num=clip_length,
interval=frame_interval,
speed=[0.5, 1.5], seed=None)
out_sampler = train_sampler
else:
val_sampler = SequentialSampling(num=clip_length,
interval=frame_interval,
fix_cursor=True,
shuffle=True, seed=None)
out_sampler = val_sampler
return out_sampler
def object_list_to_bpv(detections, num_noun_classes, max_seq_length):
sampled_detections = np.array(detections)
if max_seq_length != 0:
sampled_detections = sampled_detections[
np.linspace(0, len(detections), max_seq_length, endpoint=False, dtype=int)].tolist()
seq_length = max_seq_length
else:
seq_length = len(detections)
bpv = np.zeros((seq_length, num_noun_classes), dtype=np.float32)
for i, dets in enumerate(sampled_detections):
for obj in dets:
bpv[i, obj] = 1
return bpv
def load_left_right_tracks(hand_tracks, max_seq_length):
left_track = np.array(hand_tracks['left'], dtype=np.float32)
right_track = np.array(hand_tracks['right'], dtype=np.float32)
if max_seq_length != 0:
left_track = left_track[np.linspace(0, len(left_track), max_seq_length, endpoint=False, dtype=int)]
right_track = right_track[np.linspace(0, len(right_track), max_seq_length, endpoint=False, dtype=int)]
return left_track, right_track
def calc_distance_differences(track):
x2 = track[:, 0]
x1 = np.roll(x2, 1)
x1[0] = x1[1]
y2 = track[:, 1]
y1 = np.roll(y2, 1)
y1[0] = y1[1]
xdifs = x2 - x1
ydifs = y2 - y1
return np.concatenate((xdifs[:, np.newaxis], ydifs[:, np.newaxis]), -1)
def calc_angles(track):
x2 = track[:, 0]
x1 = np.roll(x2, 1)
x1[0] = x1[1]
y2 = track[:, 1]
y1 = np.roll(y2, 1)
y1[0] = y1[1]
angles = np.arctan2(y2 * x1 - y1 * x2, x2 * x1 + y2 * y1, dtype=np.float32)
return angles
def calc_polar_distance_from_prev(track):
return np.concatenate((np.array([0]),
np.diagonal(squareform(pdist(track)), offset=-1)))
class DataLine(object):
def __init__(self, row):
self.data = row
@property
def data_path(self):
return self.data[0]
@property
def num_frames(self): # sto palio format ayto einai to start_frame
return int(self.data[1])
@property
def label_verb(self):
return int(self.data[2])
@property
def label_noun(self):
return int(self.data[3])
@property
def uid(self):
return int(self.data[4] if len(self.data) > 4 else -1)
@property
def start_frame(self):
return int(self.data[5] if len(self.data) > 5 else -1)
@property
def label_action(self):
return int(self.data[6] if len(self.data) > 6 else -1)
class GTEADataLine(object):
def __init__(self, row):
self.data = row
self.data_len = len(row)
def get_video_path(self, prefix): # only used for FromVideoDatasetLoader and is deprecated
return os.path.join(prefix, self.id_recipe, self.data_path + '.mp4')
@property
def data_path(self):
return self.data[0]
@property
def frames_path(self):
path_parts = os.path.normpath(self.data[0]).split(os.sep)
session_parts = path_parts[1].split('-')
session = session_parts[0] + '-' + session_parts[1] + '-' + session_parts[2]
return os.path.join(path_parts[0], session, path_parts[1])
@property
def instance_name(self):
return os.path.normpath(self.data[0]).split(os.sep)[1]
@property
def id_recipe(self):
name_parts = self.data[0].split('-')
id_recipe = name_parts[0] + '-' + name_parts[1] + '-' + name_parts[2]
return id_recipe
@property
def label_action(self): # to zero based labels
return int(self.data[1]) - 1
@property
def label_verb(self):
return int(self.data[2]) - 1
@property
def label_noun(self):
return int(self.data[3]) - 1
@property
def extra_nouns(self):
extra_nouns = list()
if self.data_len > 4:
for noun in self.data[4:]:
extra_nouns.append(int(noun) - 1)
return extra_nouns
def parse_samples_list(list_file, datatype):
return [datatype(x.strip().split(' ')) for x in open(list_file)]
class ImageDatasetLoader(torchDataset):
def __init__(self, list_file, num_classes=120,
batch_transform=None, channels='RGB', validation=False):
self.samples_list = parse_samples_list(list_file, DataLine)
if num_classes != 120:
self.mapping = make_class_mapping(self.samples_list)
else:
self.mapping = None
self.transform = batch_transform
self.channels = channels
self.validation = validation
self.image_read_type = cv2.IMREAD_COLOR if channels == 'RGB' else cv2.IMREAD_GRAYSCALE
def __len__(self):
return len(self.samples_list)
def __getitem__(self, index):
img = cv2.imread(self.samples_list[index].data_path, self.image_read_type).astype(np.float32)
if self.channels == 'RGB':
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.transform is not None:
img = self.transform(img)
if self.mapping:
class_id = self.mapping[self.samples_list[index].label_verb]
else:
class_id = self.samples_list[index].label_verb
if not self.validation:
return img, class_id
else:
name_parts = self.samples_list[index].data_path.split("\\")
return img, class_id, name_parts[-2] + "\\" + name_parts[-1]
class Video(object):
# adapted from https://github.com/cypw/PyTorch-MFNet/blob/master/data/video_iterator.py
"""basic Video class"""
def __init__(self, vid_path):
self.open(vid_path)
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.__del__()
def reset(self):
self.close()
self.vid_path = None
self.frame_count = -1
self.faulty_frame = None
return self
def open(self, vid_path):
assert os.path.exists(vid_path), "VideoIter:: cannot locate: `{}'".format(vid_path)
# close previous video & reset variables
self.reset()
# try to open video
cap = cv2.VideoCapture(vid_path)
if cap.isOpened():
self.cap = cap
self.vid_path = vid_path
else:
raise IOError("VideoIter:: failed to open video: `{}'".format(vid_path))
return self
def count_frames(self, check_validity=False):
offset = 0
if self.vid_path.endswith('.flv'):
offset = -1
unverified_frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + offset
if check_validity:
verified_frame_count = 0
for i in range(unverified_frame_count):
self.cap.set(cv2.CAP_PROP_POS_FRAMES, i)
if not self.cap.grab():
print("VideoIter:: >> frame (start from 0) {} corrupted in {}".format(i, self.vid_path))
break
verified_frame_count = i + 1
self.frame_count = verified_frame_count
else:
self.frame_count = unverified_frame_count
assert self.frame_count > 0, "VideoIter:: Video: `{}' has no frames".format(self.vid_path)
return self.frame_count
def extract_frames(self, idxs, force_color=True):
frames = self.extract_frames_fast(idxs, force_color)
if frames is None:
# try slow method:
frames = self.extract_frames_slow(idxs, force_color)
return frames
def extract_frames_fast(self, idxs, force_color=True):
assert self.cap is not None, "No opened video."
if len(idxs) < 1:
return []
frames = []
pre_idx = max(idxs)
for idx in idxs:
assert (self.frame_count < 0) or (idx < self.frame_count), \
"idxs: {} > total valid frames({})".format(idxs, self.frame_count)
if pre_idx != (idx - 1):
self.cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
res, frame = self.cap.read() # in BGR/GRAY format
pre_idx = idx
if not res:
self.faulty_frame = idx
return None
if len(frame.shape) < 3:
if force_color:
# Convert Gray to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
else:
# Convert BGR to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(frame)
return frames
def extract_frames_slow(self, idxs, force_color=True):
assert self.cap is not None, "No opened video."
if len(idxs) < 1:
return []
frames = [None] * len(idxs)
idx = min(idxs)
self.cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
while idx <= max(idxs):
res, frame = self.cap.read() # in BGR/GRAY format
if not res:
# end of the video
self.faulty_frame = idx
return None
if idx in idxs:
# fond a frame
if len(frame.shape) < 3:
if force_color:
# Convert Gray to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
else:
# Convert BGR to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
pos = [k for k, i in enumerate(idxs) if i == idx]
for k in pos:
frames[k] = frame
idx += 1
return frames
def close(self):
if hasattr(self, 'cap') and self.cap is not None:
self.cap.release()
self.cap = None
return self
class VideoFromImagesDatasetLoader(torchDataset): # loads GTEA dataset from frames
OBJECTIVE_NAMES = ['label_action', 'label_verb', 'label_noun']
def __init__(self, sampler, split_file, line_type, num_classes, max_num_classes, img_tmpl='img_{:05d}.jpg',
batch_transform=None, extra_nouns=False, use_gaze=False, gaze_list_prefix=None, use_hands=False,
hand_list_prefix=None, validation=False, gaze_evaluation=False, vis_data=False):
self.sampler = sampler
self.video_list = parse_samples_list(split_file, GTEADataLine) # if line_type=='GTEA' else DataLine)
self.extra_nouns = extra_nouns
self.usable_objectives = list()
self.mappings = list()
for i, (objective, objective_name) in enumerate(zip(num_classes, FromVideoDatasetLoader.OBJECTIVE_NAMES)):
self.usable_objectives.append(objective > 0)
if objective != max_num_classes[i] and self.usable_objectives[-1]:
self.mappings.append(make_class_mapping_generic(self.video_list, objective_name))
else:
self.mappings.append(None)
assert any(obj is True for obj in self.usable_objectives)
self.transform = batch_transform
self.validation = validation
self.vis_data = vis_data
self.use_gaze = use_gaze
self.gaze_list_prefix = gaze_list_prefix
self.use_hands = use_hands
self.hand_list_prefix = hand_list_prefix
self.norm_val = [640., 480., 640., 480.]
self.image_tmpl = img_tmpl
self.gaze_evaluation = gaze_evaluation
def __len__(self):
return len(self.video_list)
def __getitem__(self, index):
path = self.video_list[index].frames_path
instance_name = self.video_list[index].instance_name
frame_count = len(os.listdir(path))
assert frame_count > 0
sampled_idxs = self.sampler.sampling(range_max=frame_count, v_id=index, start_frame=0)
sampled_frames = load_images(path, sampled_idxs, self.image_tmpl)
clip_input = np.concatenate(sampled_frames, axis=2)
or_h, or_w, _ = clip_input.shape
# gaze points is the final output, gaze data is the pickle data, gaze track is intermediate versions
gaze_points, gaze_data, gaze_track = None, None, None
if self.use_gaze:
gaze_track_path = os.path.join(self.gaze_list_prefix, instance_name + '.pkl')
gaze_data = load_pickle(gaze_track_path)
gaze_track = np.array([[value[0], value[1]] for key, value in gaze_data.items()], dtype=np.float32)
if 'DoubleFullSampling' not in self.sampler.__repr__():
gaze_track = gaze_track[sampled_idxs]
if 'DoubleFullSampling' not in self.sampler.__repr__() and not self.vis_data:
gaze_track = gaze_track[::2]
gaze_track *= self.norm_val[:2] # probably slower like this, but more robust following hand method
# hands points is the final output, hand tracks is pickle, left and right track are intermediate versions
hand_points, hand_tracks, left_track, right_track = None, None, None, None
if self.use_hands: # almost the same process as VideoAndPointDatasetLoader
hand_track_path = os.path.join(self.hand_list_prefix, instance_name + '.pkl')
hand_tracks = load_pickle(hand_track_path)
left_track = np.array(hand_tracks['left'], dtype=np.float32) # last point is never used, it is after a bug of the tracker
right_track = np.array(hand_tracks['right'], dtype=np.float32)
left_track = left_track[sampled_idxs] # keep the points for the sampled frames
right_track = right_track[sampled_idxs]
if not self.vis_data:
left_track = left_track[::2] # keep 1 coordinate pair for every two frames because we supervise 8 outputs from the temporal dim of mfnet and not 16 as the inputs
right_track = right_track[::2]
# apply transforms on the video clip
if self.transform is not None:
clip_input = self.transform(clip_input)
_, _, max_h, max_w = clip_input.shape
if self.use_hands or self.use_gaze:
is_flipped = False
if 'RandomScale' in self.transform.transforms[
0].__repr__(): # means we are in training so get the transformations
sc_w, sc_h = self.transform.transforms[0].get_new_size()
tl_y, tl_x = self.transform.transforms[1].get_tl()
if 'RandomHorizontalFlip' in self.transform.transforms[2].__repr__():
is_flipped = self.transform.transforms[2].is_flipped()
elif 'Resize' in self.transform.transforms[0].__repr__(): # means we are in testing
sc_h, sc_w, _ = self.transform.transforms[0].get_new_shape()
tl_y, tl_x = self.transform.transforms[1].get_tl()
else:
sc_w = or_w
sc_h = or_h
tl_x = 0
tl_y = 0
# apply transforms to tracks
scale_x = sc_w / or_w
scale_y = sc_h / or_h
norm_val = [max_w, max_h, max_w, max_h]
if self.use_hands:
left_track *= [scale_x, scale_y]
left_track -= [tl_x, tl_y]
right_track *= [scale_x, scale_y]
right_track -= [tl_x, tl_y]
if is_flipped:
left_track[:, 0] = max_w - left_track[:, 0] # apply flipping on x axis
right_track[:, 0] = max_w - right_track[:, 0]
if self.vis_data:
left_track_vis = left_track
right_track_vis = right_track
left_track = left_track[::2]
right_track = right_track[::2]
# for the DSNT layer normalize to [-1, 1] for x and to [-1, 2] for y, which can get values greater than +1 when the hand is originally not detected
left_track = (left_track * 2 + 1) / norm_val[:2] - 1
right_track = (right_track * 2 + 1) / norm_val[2:] - 1
if self.use_gaze:
gaze_track *= [scale_x, scale_y]
gaze_track -= [tl_x, tl_y]
if is_flipped:
gaze_track[:, 0] = max_w - gaze_track[:, 0] # flip x axis
if self.vis_data:
gaze_track_vis = gaze_track
gaze_track = gaze_track[::2]
gaze_track = (gaze_track * 2 + 1) / norm_val[:2] - 1
# get the labels for the tasks
labels = list()
if self.usable_objectives[0]:
action_id = self.video_list[index].label_action
if self.mappings[0]:
action_id = self.mappings[0][action_id]
labels.append(action_id)
if self.usable_objectives[1]:
verb_id = self.video_list[index].label_verb
if self.mappings[1]:
verb_id = self.mappings[1][verb_id]
labels.append(verb_id)
if self.usable_objectives[2]:
noun_id = self.video_list[index].label_noun
if self.mappings[2]:
noun_id = self.mappings[2][noun_id]
labels.append(noun_id)
if self.extra_nouns:
extra_nouns = self.video_list[index].extra_nouns
if self.mappings[2]:
extra_nouns = [self.mappings[2][en] for en in extra_nouns]
for en in extra_nouns:
labels.append(en)
if self.use_gaze or self.use_hands:
labels = np.array(labels, dtype=np.float32)
else:
labels = np.array(labels, dtype=np.int64) # numpy array for pytorch dataloader compatibility
if self.use_gaze:
gaze_points = gaze_track.astype(np.float32).flatten()
labels = np.concatenate((labels, gaze_points))
if self.use_hands:
hand_points = np.concatenate((left_track[:, np.newaxis, :], right_track[:, np.newaxis, :]), axis=1).astype(np.float32)
hand_points = hand_points.flatten()
labels = np.concatenate((labels, hand_points))
if self.vis_data:
# for i in range(len(sampled_frames)):
# cv2.imshow('orig_img', sampled_frames[i])
# cv2.imshow('transform', clip_input[:, i, :, :].numpy().transpose(1, 2, 0))
# cv2.waitKey(0)
def vis_with_circle(img, left_point, right_point, winname):
k = cv2.circle(img.copy(), (int(left_point[0]), int(left_point[1])), 10, (255, 0, 0), 4) # blue is left
k = cv2.circle(k, (int(right_point[0]), int(right_point[1])), 10, (0, 0, 255), 4) # red is right
cv2.imshow(winname, k)
def vis_with_circle_gaze(img, gaze_point, winname):
k = cv2.circle(img.copy(), (int(gaze_point[0]), int(gaze_point[1])), 10, (0, 255, 0), 4) # green is gaze
cv2.imshow(winname, k)
if self.use_hands:
orig_left = np.array(hand_tracks['left'], dtype=np.float32)
orig_left = orig_left[sampled_idxs]
orig_right = np.array(hand_tracks['right'], dtype=np.float32)
orig_right = orig_right[sampled_idxs]
for i in range(len(sampled_frames)):
vis_with_circle(sampled_frames[i], orig_left[i], orig_right[i], 'hands no aug')
vis_with_circle(clip_input[:, i, :, :].numpy().transpose(1, 2, 0), left_track_vis[i], right_track_vis[i],
'hands transformed')
vis_with_circle(clip_input[:, i, :, :].numpy().transpose(1, 2, 0), orig_left[i], orig_right[i],
'hands trans. img not coords')
cv2.waitKey(0)
if self.use_gaze:
orig_gaze = np.array([[value[0], value[1]] for key, value in gaze_data.items()], dtype=np.float32)[sampled_idxs]
for i in range(len(sampled_frames)):
vis_with_circle_gaze(sampled_frames[i], orig_gaze[i]*self.norm_val[:2], 'gaze no aug')
vis_with_circle_gaze(clip_input[:, i, :, :].numpy().transpose(1, 2, 0), gaze_track_vis[i], 'gaze transformed')
vis_with_circle_gaze(clip_input[:,i, :, :].numpy().transpose(1, 2, 0), orig_gaze[i]*self.norm_val[:2], 'gaze trans. img not coords')
cv2.waitKey(0)
if self.validation:
return clip_input, labels, instance_name
elif self.gaze_evaluation:
orig_gaze = np.array([[value[0], value[1]] for key, value in gaze_data.items()], dtype=np.float32).flatten()
return clip_input, labels, orig_gaze, instance_name
else:
return clip_input, labels
from gulpio import GulpDirectory
class FromVideoDatasetLoaderGulp(torchDataset): #loads GTEA dataset from gulp
OBJECTIVE_NAMES = ['label_action', 'label_verb', 'label_noun']
def __init__(self, sampler, split_file, line_type, num_classes, max_num_classes, batch_transform=None,
extra_nouns=False, use_gaze=False, gaze_list_prefix=None, use_hands=False, hand_list_prefix=None,
validation=False, vis_data=False):
self.sampler = sampler
self.video_list = parse_samples_list(split_file, GTEADataLine) # if line_type=='GTEA' else DataLine)
self.extra_nouns = extra_nouns
self.usable_objectives = list()
self.mappings = list()
for i, (objective, objective_name) in enumerate(zip(num_classes, FromVideoDatasetLoader.OBJECTIVE_NAMES)):
self.usable_objectives.append(objective > 0)
if objective != max_num_classes[i] and self.usable_objectives[-1]:
self.mappings.append(make_class_mapping_generic(self.video_list, objective_name))
else:
self.mappings.append(None)
assert any(obj is True for obj in self.usable_objectives)
self.transform = batch_transform
self.validation = validation
self.vis_data = vis_data
self.use_gaze = use_gaze
self.gaze_list_prefix = gaze_list_prefix
self.use_hands = use_hands
self.hand_list_prefix = hand_list_prefix
self.norm_val = [640., 480., 640., 480.]
# gulp_data_dir = r"D:\Datasets\egocentric\GTEA\gulp_output2"
gulp_data_dir = r"F:\workspace_George\GTEA\gteagulp"
self.gd = GulpDirectory(gulp_data_dir)
# self.items = list(self.gd.merged_meta_dict.items())
self.merged_data_dict = self.gd.merged_meta_dict
self.num_chunks = self.gd.num_chunks
self.data_path = gulp_data_dir
def __len__(self):
return len(self.video_list)
def __getitem__(self, index):
# item_id, item_info = self.items[index]
# assert item_id == self.video_list[index].data_path
path = self.video_list[index].data_path
item_info = self.merged_data_dict[path]
frame_count = len(item_info['frame_info'])
assert frame_count > 0
sampled_idxs = self.sampler.sampling(range_max=frame_count, v_id=index, start_frame=0)
# sampled_idxs = [10,11,13,14,15,15,15,15]
sampler_step = self.sampler.interval
produced_step = np.mean(sampled_idxs[1:] - np.roll(sampled_idxs,1)[1:])
if sampler_step[0] == produced_step:
sampled_frames, meta = self.gd[path, slice(sampled_idxs[0], sampled_idxs[-1]+1, sampler_step[0])]
else:
imgs, meta = self.gd[path]
assert sampled_idxs[-1] < len(imgs)
sampled_frames = []
for i in sampled_idxs:
sampled_frames.append(imgs[i])
clip_input = np.concatenate(sampled_frames, axis=2)
gaze_points = None
if self.use_gaze:
pass
hand_points = None
if self.use_hands: # almost the same process as VideoAndPointDatasetLoader
hand_track_path = os.path.join(self.hand_list_prefix, path + '.pkl')
hand_tracks = load_pickle(hand_track_path)
left_track = np.array(hand_tracks['left'], dtype=np.float32)
right_track = np.array(hand_tracks['right'], dtype=np.float32)
left_track = left_track[sampled_idxs] # keep the points for the sampled frames
right_track = right_track[sampled_idxs]
if not self.vis_data:
left_track = left_track[::2] # keep 1 coordinate pair for every two frames because we supervise 8 outputs from the temporal dim of mfnet and not 16 as the inputs
right_track = right_track[::2]
norm_val = self.norm_val
if self.transform is not None:
or_h, or_w, _ = clip_input.shape
clip_input = self.transform(clip_input) # have to put this line here for compatibility with the hand transform code
is_flipped = False
if 'RandomScale' in self.transform.transforms[
0].__repr__(): # means we are in training so get the transformations
sc_w, sc_h = self.transform.transforms[0].get_new_size()
tl_y, tl_x = self.transform.transforms[1].get_tl()
if 'RandomHorizontalFlip' in self.transform.transforms[2].__repr__():
is_flipped = self.transform.transforms[2].is_flipped()
elif 'Resize' in self.transform.transforms[0].__repr__(): # means we are in testing
sc_h, sc_w, _ = self.transform.transforms[0].get_new_shape()
tl_y, tl_x = self.transform.transforms[1].get_tl()
else:
sc_w = or_w
sc_h = or_h
tl_x = 0
tl_y = 0
# apply transforms to tracks
scale_x = sc_w / or_w
scale_y = sc_h / or_h
left_track *= [scale_x, scale_y]
left_track -= [tl_x, tl_y]
right_track *= [scale_x, scale_y]
right_track -= [tl_x, tl_y]
_, _, max_h, max_w = clip_input.shape
norm_val = [max_w, max_h, max_w, max_h]
if is_flipped:
left_track[:, 0] = max_w - left_track[:, 0]
right_track[:, 0] = max_w - right_track[:, 0]
if self.vis_data:
left_track_vis = left_track
right_track_vis = right_track
left_track = left_track[::2]
right_track = right_track[::2]
# for the DSNT layer normalize to [-1, 1] for x and to [-1, 2] for y, which can get values greater than +1 when the hand is originally not detected
left_track = (left_track * 2 + 1) / norm_val[:2] - 1
right_track = (right_track * 2 + 1) / norm_val[2:] - 1
hand_points = np.concatenate((left_track[:, np.newaxis, :], right_track[:, np.newaxis, :]), axis=1).astype(np.float32)
hand_points = hand_points.flatten()
# apply transforms on the video clip
if self.transform is not None and not (self.use_hands or self.use_gaze):
clip_input = self.transform(clip_input)
# get the labels for the tasks
labels = list()
if self.usable_objectives[0]:
action_id = self.video_list[index].label_action
if self.mappings[0]:
action_id = self.mappings[0][action_id]
labels.append(action_id)
if self.usable_objectives[1]:
verb_id = self.video_list[index].label_verb
if self.mappings[1]:
verb_id = self.mappings[1][verb_id]
labels.append(verb_id)
if self.usable_objectives[2]:
noun_id = self.video_list[index].label_noun
if self.mappings[2]:
noun_id = self.mappings[2][noun_id]
labels.append(noun_id)
if self.extra_nouns:
extra_nouns = self.video_list[index].extra_nouns
if self.mappings[2]:
extra_nouns = [self.mappings[2][en] for en in extra_nouns]
for en in extra_nouns:
labels.append(en)
if self.use_gaze or self.use_hands:
labels = np.array(labels, dtype=np.float32)
else:
labels = np.array(labels, dtype=np.int64) # numpy array for pytorch dataloader compatibility
if self.use_gaze:
labels = np.concatenate((labels, gaze_points))
if self.use_hands:
labels = np.concatenate((labels, hand_points))
if self.vis_data:
# for i in range(len(sampled_frames)):
# cv2.imshow('orig_img', sampled_frames[i])
# cv2.imshow('transform', clip_input[:, i, :, :].numpy().transpose(1, 2, 0))
# cv2.waitKey(0)
def vis_with_circle(img, left_point, right_point, winname):
k = cv2.circle(img.copy(), (int(left_point[0]), int(left_point[1])), 10, (255, 0, 0), 4)
k = cv2.circle(k, (int(right_point[0]), int(right_point[1])), 10, (0, 0, 255), 4)
cv2.imshow(winname, k)
orig_left = np.array(hand_tracks['left'], dtype=np.float32)
orig_left = orig_left[sampled_idxs]
orig_right = np.array(hand_tracks['right'], dtype=np.float32)
orig_right = orig_right[sampled_idxs]
for i in range(len(sampled_frames)):
vis_with_circle(sampled_frames[i], orig_left[i], orig_right[i], 'no augmentation')
vis_with_circle(clip_input[:, i, :, :].numpy().transpose(1, 2, 0), left_track_vis[i], right_track_vis[i],
'transformed')
vis_with_circle(clip_input[:, i, :, :].numpy().transpose(1, 2, 0), orig_left[i], orig_right[i],
'transf_img_not_coords')
cv2.waitKey(0)
if not self.validation:
return clip_input, labels
else:
return clip_input, labels, self.video_list[index].data_path
class FromVideoDatasetLoader(torchDataset): # loads gtea dataset from video files; not gonna be using anymore
OBJECTIVE_NAMES = ['label_action', 'label_verb', 'label_noun']
def __init__(self, sampler, split_file, line_type, num_classes, max_num_classes, batch_transform=None, extra_nouns=False,
validation=False, vis_data=False):
self.sampler = sampler
self.video_list = parse_samples_list(split_file, GTEADataLine) # if line_type=='GTEA' else DataLine)
self.extra_nouns = extra_nouns
# num_classes is a list with 3 integers.
# num_classes[0] = num_actions,
# num_classes[1] = num_verbs,
# num_classes[2] = num_nouns
# if any of these has the value <= 0 then this objective will not be used in the network
# if any of these has value different than its respective on max_num_classes then I perform class mapping
# max_num_classes is a list with 3 integers which define the maximum number of classes for the objective and is
# fixed for certain dataset. E.g. for EPIC it is [0, 125, 322], for GTEA it is [106, 19, 53]
self.usable_objectives = list()
self.mappings = list()
for i, (objective, objective_name) in enumerate(zip(num_classes, FromVideoDatasetLoader.OBJECTIVE_NAMES)):
self.usable_objectives.append(objective > 0)
if objective != max_num_classes[i] and self.usable_objectives[-1]:
self.mappings.append(make_class_mapping_generic(self.video_list, objective_name))
else:
self.mappings.append(None)
assert any(obj is True for obj in self.usable_objectives)
self.transform = batch_transform
self.validation = validation
self.vis_data = vis_data
def __len__(self):
return len(self.video_list)
def __getitem__(self, index):
sampled_frames = []
try:
with Video(vid_path=self.video_list[index].get_video_path(prefix='gtea_clips')) as vid:
start_frame = 0
frame_count = vid.count_frames(check_validity=False)
sampled_idxs = self.sampler.sampling(range_max=frame_count, v_id=index, start_frame=start_frame)
sampled_frames = vid.extract_frames(idxs=sampled_idxs, force_color=True)
except IOError as e:
print(">> I/O error({0}): {1}".format(e.errno, e.strerror))
clip_input = | np.concatenate(sampled_frames, axis=2) | numpy.concatenate |
# USAGE
# python mnist_dbn.py
# import the necessary packages
from __future__ import print_function
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from DataAndDescription.utils import dataset
from sklearn import datasets
from nolearn.dbn import DBN
from sklearn.externals import joblib
import matplotlib.pyplot as plt
import cv2
import numpy as np
def plot_error(epochs, errors_fine_tune):
plt.plot(epochs, errors_fine_tune, '-', linewidth=2.0, label='error')
plt.xlabel('Epochs (Number of times data is shown to the Network)')
plt.ylabel('Error')
plt.legend()
plt.title('Decline in Error during training')
plt.xlim(np.min(epochs) - 0.003, np.max(epochs) + 0.003)
plt.ylim( | np.min(errors_fine_tune) | numpy.min |
#! /usr/bin/env python
# -*- coding:utf-8 -*-
"""Generate SN Ia toy models for Weizmann workshop code-comparison study
(Radiation Transfer and Explosive Thermonuclear Burning in Supernovae,
17-28 June 2018)
The model is defined by its total mass (--mtot) and asymptotic kinetic
energy (--ekin; alternatively it can be determined given the
composition based on Eq. 1 of W07). The density profile can either be
exponential (--densprof expon) or consist of a broken power law with
indices delta,n (--densprof power --densexp <delta>,<n>; see CS89,
K10).
The ejecta is divided into N zones with constant velocity width
(--dvel). The mass of each zone is computed given the zone volume
(radii determined from velocity assuming homologous expansion) and
density profile. Starting from the central zone, we keep adding mass
shells until the ejecta mass reaches 99.99% of the total mass.
The ejecta is supposed to consist of four distinct chemical zones: the
innermost zone consists of stable IGEs (mass set using --mige; 100% Fe
unless --xfracni is set to the relative fraction of stable Ni); then
comes the 56Ni zone (mass at t=0 set using --mni56); then the IME zone
(mass set using --mime; the IMEs to include are specified using --ime
and their relative fraction with --xfracime). Note that some trace
amount of Ti can be included in the 56Ni and IME zones with --xfracti
(we simply replace xfracti of the 56Ni and IME masses with
Ti). Finally, any remaining outermost layer is set to unburnt C/O (the
relative fraction of O is set using --xfraco). The ejecta must contain
some 56Ni and IMEs, but does not necessarily have to include stable
IGEs or unburnt C/O.
| || || || |
| stable IGEs || 56Ni || IMEs || unburnt C/O |
| (optional) || (+Ti) || (+Ti) || (optional) |
mass = 0.............................................mtot
The abundance profiles are connected using an analytical function
(--transprof) over a given mass range (--dmige for stable IGE -> 56Ni
connection; --dmni56 for 56Ni -> IME connection; --dmime for IME ->
unburnt C/O connection). Note that one can set dmige = dmni56 = dmime
using the --dmtrans option. The transition profile can either be a
linear function (--transprof linear), an inverse-exponential (aka
'logistic') function with an associated scale factor(--transprof
invexpon --transscl <scale factor>; see M18), or a cosine bell
(--transprof cosine).
The ejecta is evolved to a time (--tend) by solving the first law of
thermodynamics assuming a radiation-dominated gas, local energy
deposition from 56Ni decay, and no diffusion (i.e. the temperature in
each zone is solved independently from adjacent zones). Given these
assumptions, the final temperature can be determined analytically by
noting that the time-weighted internal energy (=t*E(t)) equals the
time-integrated time-weighted decay energy deposition rate
(=Int{t*Q(t) dt}), as noted by K13 (we ignore the time-weighted
internal energy shortly after explosion E(t0)*t0 << Int{Q(t) t dt}). A
minimum temperature can be set using --tempmin.
Last, an output file is generated (--fout) and the density/abundance
profiles are displayed (unless --noplot is set).
Parameters
----------
Typing:
python mk_snia_toy_model.py -h
will print the usage and input parameters (with their default values))
Examples
--------
1) ejecta with default settings (see python mk_snia_toy_model.py -h):
python mk_snia_toy_model.py
2) same as 1) but with broken power-law density profile
python mk_snia_toy_model.py --densprof power --densexp 0,10
3) 1.4 Msun ejecta (default) with Ekin computed based on composition,
consisting of 0.1 Msun stable IGEs (default), 0.6 Msun 56Ni
(default), 0.6 Msun IMEs (Mg, Si, S, Ca, all with default relative
mass fractions), and hence 0.1 Msun unburnt C/O in equal mass
fractions (default), connected over a mass range 0.1 Msun
(default) using a cosine bell:
python mk_snia_toy_model.py --ekinw07 --transprof cosine
4) 1.0 Msun ejecta with Ekin=10^51 erg (default) consisting only of
56Ni (0.5 Msun) and Si (0.5 Msun), connected over a mass range 0.1
Msun (default):
python mk_snia_toy_model.py --mtot 1.0 --mni56 0.5 --mime 0.5 --ime si
References
----------
CS89: Chevalier & Soker (1989), ApJ, 341, 867
J99: Jeffery (1999) arXiv:astro-ph/9907015
K10: Kasen (2010), ApJ, 708, 1025
K13: Katz et al. (2013), arXiv:1301.6766 [astro-ph]
M18: Magee et al. (2018), arXiv:1803.04436v1
W07: Woosley et al. (2007), ApJ, 662, 487
TODO
----
- define grid based on delta_mass as opposed to delta_vel
- adjust delta_vel (increase resolution) in composition transition zones
Revision history
----------------
27 Mar 2018 - first version of code (<NAME>, SB)
29 Mar 2018 - revised version (Boaz Katz, BK)
o replaced temperature iteration with analytical calculation
(see Katz et al. 2013), and removed references to an initial
time t0 (ejecta evolved to final time T_END directly)
o use a finer grid (in mass coordinates) for abundance profile
calculations (change_mass_res() function)
o correction to average density in transition region + special
treatment of cell containing the break for broken power-law
density profile
o added values of various constants to output file
o added new columns (X_IGE0 (at t=0), X_56Ni0, X_IME, X_CO) to
output file and rearranged columns to first display parameters
that do not depend on the final time
03 Apr 2018 - revised version for testing by workshop participants (SB)
o code clean-up and added references to radioactive data
05 Apr 2018 - revised version (SB, per <NAME>' suggestions)
o added Python2/3 compatibility
o removed unused variables for temperature iteration
15 May 2018 - revised version (SB)
o added option to include some Ti in 56Ni & IME zones (--xfracti)
o report actual abundances in output file header in addition to requested ones
o version date stamp
o rearrange IMEs order in output file by decreasing atomic mass
20 May 2018 - revised version (SB)
o added nzones and Vmax to output file header
07 Jun 2018 - revised version (SB & BK)
o corrected bug in minxfrac option
o implemented calculation of gamma-ray escape time t0 from J99 (BK)
Author contact
--------------
<NAME>, <EMAIL>
"""
import sys
import os
import re
import numpy as np
### version number
VERSION = '2018-06-07'
### ensure Python2 (2.6 or 2.7) and Python3 compatibility
if sys.version_info.major == 2:
input = raw_input # input() to mean raw_input() when running Python2
### constants
# (astro)physical constants
AMU = 1.660540e-24 # atomic mass unit (g)
ARAD = 7.5659125e-15 # radiation constant [erg/cm^3/K^4]
MSUN = 1.989e+33 # solar mass (g)
# 56Ni decay
EDECAY_56NI = 1.7206 # energy per 56Ni decay (MeV) - obtained by summing photon energies from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56NI&unc=nds
EDECAY_56CO = 3.6072 # energy per 56Co decay (MeV) - obtained by summing photon energies from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56CO&unc=nds
MASS_56NI = 55.94212855 # mass of 56Ni nucleus (AMU) - from https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl?ele=Ni&isotype=all
MASS_56CO = 55.93983880 # mass of 56Co nucleus (AMU) - from https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl?ele=Co&isotype=all
THALF_56NI = 6.075 # 56Ni half-life (days) - from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56NI&unc=nds
THALF_56CO = 77.236 # 56Co half-life (days) - from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56CO&unc=nds
KAPPA_GAMMA = 0.025 # effective gamma-ray opacity (cm^2/g) for calculating the gamma-ray escape time in optically thin limit only, assuming mue=0.5 from J99
# conversion factors
DAY2SEC = 86400.0 # days -> sec conversion
MEV2ERG = 1.60217733e-6 # MeV -> erg conversion factor
# misc
EPSILON = 1e-5 # smallish number
MAXFRAC_TI = 1e-4 # maximum value for Ti fraction in 56Ni and IME zones
MAXMINXFRAC = 1e-5 # ensure --minxfrac option doesn't exceed this value
### defaults
MTOT_INIT = 1.40 # total mass (msun)
EKIN_INIT = 1.00 # asymptotic kinetic energy (1e51 erg)
DVEL_INIT = 100.0 # cell size (km/s)
DENSPROF_INIT = 'expon' # "density profile: 'expon' (exponential) or 'power' (broken power-law)
DENSEXP_INIT = '0,10' # exponents for broken power-law density profile: <delta>,<n> e.g. --densexp 0,10
MIGE_INIT = 0.1 # stable IGE mass (msun)
MNI56_INIT = 0.6 # 56Ni mass at t=0 (msun)
MIME_INIT = 0.6 # IME mass (msun)
DMIGE_INIT = 0.1 # mass interval over which stable IGE mass fraction transitions from 1 to 0 (msun)
DMNI56_INIT = 0.1 # mass interval over which 56Ni mass fraction transitions from 1 to 0 (msun)
DMIME_INIT = 0.1 # mass interval over which IME mass fraction transitions from 1 to 0 (msun)
DMFINE_INIT = 1e-4 # resolution of fine grid of masses used for transitions (msun)
TRANSPROF_INIT = 'linear' # transition profile for mass fraction variation from 1 to 0: 'linear', 'invexpon' (inverse exponential) or 'cosine' (cosine bell)
TRANSSCL_INIT = 1.4e2 # scale factor for 'invexpon' (inverse exponential) transition profile; this default value of 140 ensures X>0.999 at the lower boundary
XIGEFRAC_NI = 0.1 # fraction of stable IGE mass as stable Ni; the rest gets set to stable Fe
XCOFRAC_O = 0.5 # fraction of unburnt C/O mass as O; the rest gets set to C
XFRACTI_INIT = 0.0 # fraction of mass in 56Ni and IME zones set to Ti
T_END = 1.0 # final time for toy model (days)
TEMP_MIN = 1e3 # minimum allowed temperature (K)
FOUT_INIT = 'snia_toy.dat' # output file name
### which IMEs to consider
#
# NOTE: can be modified but ensure Sum(XFRACIME_INIT)=1.0
# (if only one IME is given then --xfracime is set to 1.0 automatically)
#
# in model DDC10 from <NAME>:
#
# M(Ca+S+Si+Mg) = 0.466 Msun
# M(Ca) / M(Ca+S+Si+Mg) ~ 0.087
# M(S) / M(Ca+S+Si+Mg) ~ 0.351
# M(Si) / M(Ca+S+Si+Mg) ~ 0.542
# M(Mg) / M(Ca+S+Si+Mg) ~ 0.020
#
IME_INIT = 'ca,s,si,mg' # comma-separated list of IMEs to include
XFRACIME_INIT = '0.087,0.351,0.542,0.020' # comma-separated list of relative IME fractions
###############################################################################
def change_mass_res(dm_oldres, x_oldres, dm_newres):
"""for mass grid with cell masses dm_oldres, and abundances
x_oldres, find abundances at new resolution grid with cell masses
dm_newres
"""
x_newres = dm_newres * 0.0
l_new = 0
l_old = 0
Nnew = len(dm_newres)
Nold = len(dm_oldres)
mold = dm_oldres[l_old]
mnew = dm_newres[l_new]
mxaccum = 0.0
while (l_new < Nnew) and (l_old < Nold):
if mnew <= mold:
mxaccum += mnew * x_oldres[l_old]
mold -= mnew
x_newres[l_new] = mxaccum / dm_newres[l_new]
mxaccum = 0.0
l_new += 1
if l_new < Nnew:
mnew = dm_newres[l_new]
else:
mxaccum += mold * x_oldres[l_old]
mnew -= mold
l_old += 1
if l_old < Nold:
mold = dm_oldres[l_old]
if l_new < Nnew:
x_newres[l_new] = mxaccum / dm_newres[l_new]
return x_newres
###############################################################################
def shell_column_density(r_rshell):
"""the correction factor f for the average column density through
a spherical shell at rshell, as seen by a spherical shell at r
the column density is f*mshell/(4*pi*rshell^2). For r->0 f->1.
"""
x = r_rshell * 1.0
y = x / np.sqrt(np.abs(1 - x**2))
ansx = x * 0.0
ansx[x>1] = np.log(2.0 * (np.sqrt(y[x>1]**2 - 1) + y[x>1])) - np.log(2)
ansx[x<1] = (np.arcsinh(y[x<1]) - np.arcsinh(-y[x<1])) / 2.0
ans = ansx / x
return ans
###############################################################################
def total_column_density_cgs(v_edge, m_cell, XNi56):
""" calculate the total, ni56(t=0) weighted, angle averaged,
column density (multiplied by t^2 so constant)
*****NOTE***** that v_edge, m_cell, XNi56 are in cgs
"""
mNi56_cell = m_cell * XNi56
N_cell = len(m_cell)
def cell_to_edge(a_cell):
a_edge = a_cell * 1.0
a_edge[-1] = a_cell[-1] / 2.0
a_edge[:-1] = (a_cell[:-1] + a_cell[1:]) / 2.0
return a_edge
def edge_to_mid(a_edge):
a_mid = a_edge * 1.0
a_mid[0] = a_edge[0] / 2.0
a_mid[1:] = (a_edge[:-1] + a_edge[1:]) / 2.0
return a_mid
v_mid = edge_to_mid(v_edge)
m_edge = cell_to_edge(m_cell)
SigV_edge = m_edge / (4 * np.pi * v_edge**2)
SigV_ave_cell = m_cell * 0.0
for lcell in range(N_cell):
SigV_ave_cell[lcell] = np.sum(SigV_edge * shell_column_density(v_mid[lcell] / v_edge))
SigV_tot = np.sum(SigV_ave_cell * mNi56_cell) / np.sum(mNi56_cell)
return SigV_tot
###############################################################################
if __name__ == '__main__':
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#
# options
#
parser.add_argument('--mtot', default=MTOT_INIT, type=float, help='total mass (msun)')
parser.add_argument('--ekin', default=EKIN_INIT, type=float, help='asymptotic Ekin (1e51 erg)')
parser.add_argument('--ekinw07', action='store_true', help='compute Ekin based on W07, Eq. 1')
parser.add_argument('--dvel', default=DVEL_INIT, type=float, help='cell size (km/s)')
parser.add_argument('--densprof', default=DENSPROF_INIT, type=str, choices=['expon','power'], help="density profile: 'expon' (exponential) or 'power' (broken power-law)")
parser.add_argument('--densexp', default=DENSEXP_INIT, type=str, help='exponents for broken power-law density profile: <delta>,<n> e.g. --densexp 0,10')
parser.add_argument('--tend', default=T_END, type=float, help='final time for toy model (d)')
parser.add_argument('--tempmin', default=TEMP_MIN, type=float, help='minimum allowed temperature (K)')
parser.add_argument('--mige', default=MIGE_INIT, type=float, help='stable IGE mass (msun)')
parser.add_argument('--mni56', default=MNI56_INIT, type=float, help='56Ni mass at t=0 (msun)')
parser.add_argument('--mime', default=MIME_INIT, type=float, help='IME mass (msun)')
parser.add_argument('--dmige', default=DMIGE_INIT, type=float, help='mass interval over which stable IGE mass fraction transitions from 1 to 0 (msun)')
parser.add_argument('--dmni56', default=DMNI56_INIT, type=float, help='mass interval over which 56Ni mass fraction transitions from 1 to 0 (msun)')
parser.add_argument('--dmime', default=DMIME_INIT, type=float, help='mass interval over which IME mass fraction transitions from 1 to 0 (msun)')
parser.add_argument('--dmtrans', default=None, type=float, help='to set dmige=dmni56=dmime=dmtrans in one go')
parser.add_argument('--dmfine', default=DMFINE_INIT, type=float, help='resolution of fine grid of masses for transitions (msun)')
parser.add_argument('--transprof', default=TRANSPROF_INIT, type=str, choices=['linear', 'invexpon','cosine'], help="transition profile for mass fraction variation from 1 to 0: 'linear', 'invexpon' (inverse exponential) or 'cosine' (cosine bell)")
parser.add_argument('--transscl', default=TRANSSCL_INIT, type=float, help="scale factor for 'invexpon' (inverse exponential) transition profile")
parser.add_argument('--xfracni', default=XIGEFRAC_NI, type=float, help='fraction of stable IGE mass as stable Ni; the rest gets set to stable Fe')
parser.add_argument('--xfraco', default=XCOFRAC_O, type=float, help='fraction of unburnt C/O mass as O; the rest gets set to C')
parser.add_argument('--xfracti', default=XFRACTI_INIT, type=float, help='fraction of mass in 56Ni and IME zones set to Ti')
parser.add_argument('--ime', default=IME_INIT, type=str, help='comma-separated list of IMEs to include')
parser.add_argument('--xfracime', default=XFRACIME_INIT, type=str, help='comma-separated list of relative IME fractions')
parser.add_argument('--minxfrac', default=None, type=float, help='minimum mass fraction for output to file/plot')
parser.add_argument('--fout', default=FOUT_INIT, type=str, help='output file name')
parser.add_argument('--noplot', action='store_true', help='disable plotting of density/abundance profiles')
parser.add_argument('--nowarn', action='store_true', help='disable warning messages')
parser.add_argument('--debug', action='store_true', help='print various stuff for debugging')
parser.add_argument('--test', action='store_true', help='for testing purposes')
args = parser.parse_args()
print('')
print('#############################')
print(' SN Ia toy model' )
print('#############################')
#
# check masses make sense
#
mtot = args.mtot
mige = args.mige
mni56 = args.mni56
mime = args.mime
if (1.0 - (mni56 + mime)/mtot) < EPSILON and mige > EPSILON:
print('')
print('WARNING - 56Ni mass + IME mass = total mass; setting IGE mass to 0')
mige = 0.0
mburnt = mige + mni56 + mime
if mburnt > mtot:
sys.exit("ERROR - burnt mass exceeds total mass! mtot, mburnt = {:.3f}, {:.3f} Msun".format(mtot, mburnt))
elif mni56 < EPSILON:
sys.exit("ERROR - 56Ni mass must be > 0! mni56 = {:.3f} Msun".format(mni56))
elif mime < EPSILON:
sys.exit("ERROR - IME mass must be > 0! mime = {:.3f} Msun".format(mime))
else:
munbco = mtot - mburnt # unburnt mass
#
# check IMEs
#
imes = args.ime.split(',')
nime = len(imes)
for ii, ime in enumerate(imes):
if ime not in IME_INIT:
sys.exit("ERROR - IME {:s} not in default IME_INIT: {:s}".format(ime, IME_INIT))
if nime == 1:
xfracimestr = ['1.0']
xfracime = [1.0]
else:
xfracimestr = args.xfracime.split(',')[:nime]
xfracime = [float(xx) for xx in xfracimestr]
xfracimetot = sum(xfracime)
if np.abs(1.0 - 1.0/xfracimetot) > EPSILON:
sys.exit("ERROR - relative IME mass fractions don't sum up to 1! sum(xfracime) = {:.5f}".format(xfracimetot))
#
# check Ti fraction
#
xfracti = args.xfracti
if (xfracti > MAXFRAC_TI):
sys.exit("ERROR - xfracti ({:.4e}) cannot exceed MAXFRAC_TI ({:.4e})".format(xfracti, MAXFRAC_TI))
else:
mti_ni56 = xfracti * mni56 # Ti mass in 56Ni zone
mti_ime = xfracti * mime # Ti mass in IME zone
mti = mti_ni56 + mti_ime
print('')
print('INFO - user-defined ejecta mass and composition:')
print('')
print(' Mtot = {:.4e} Msun'.format(mtot))
print(' M(stable IGE) = {:.4e} Msun of which {:.1f}% Fe and {:.1f}% Ni'.format(mige, (1.0-args.xfracni)*1e2, args.xfracni*1e2))
print(' M(56Ni) = {:.4e} Msun'.format(mni56))
sys.stdout.write(' M(IME) = {:.4e} Msun of which'.format(mime))
for ii, ime in enumerate(imes):
sys.stdout.write(' {:.1f}% {:s}'.format(xfracime[ii]*1e2, ime.capitalize()))
if ii == nime-1:
print('')
else:
if ii == nime-2:
sys.stdout.write(' and')
else:
sys.stdout.write(',')
print(' M(unburnt C/O) = {:.4e} Msun of which {:.1f}% C and {:.1f}% O'.format(munbco, (1.0-args.xfraco)*1e2, args.xfraco*1e2))
if (xfracti > 0.0):
print('')
print(' NOTE: will replace {:.4e} Msun of 56Ni mass and {:.4e} Msun of IME mass with Ti'.format(mti_ni56, mti_ime))
#
# check mass intervals dmX
#
if args.dmtrans is not None:
dmige = args.dmtrans
dmni56 = args.dmtrans
dmime = args.dmtrans
else:
dmige = args.dmige
dmni56 = args.dmni56
dmime = args.dmime
# if there are no IGEs or unburnt C/O, set IGE or IME mass intervals to 0
if mige < EPSILON:
mige = 0.0
dmige = 0.0
if munbco < EPSILON:
munbco = 0.0
dmime = 0.0
# requirements on IGE/56Ni/IME/CO mass given mass intervals
if mige < 0.5*dmige:
sys.exit("ERROR - Need to increase IGE mass or decrease dM(IGE) as M(IGE) < dM(IGE)/2! mime, dmige = {:.3f}, {:.3f} Msun".format(mige, dmige))
if mni56 < 0.5*(dmige+dmni56):
sys.exit("ERROR - Need to increase 56Ni mass or decrease dM(IGE)+dM(56Ni) as M(56Ni) < [dM(IGE)+dM(56Ni)]/2! mni56, dmige, dmni56 = {:.3f}, {:.3f}, {:.3f} Msun".format(mni56, dmige, dmni56))
if mime < 0.5*(dmni56+dmime):
sys.exit("ERROR - Need to increase 56Ni mass or decrease dM(56Ni)+dM(IME) as M(56Ni) < [dM(56Ni)+dM(IME)]/2! mime, dmni56, dmime = {:.3f}, {:.3f}, {:.3f} Msun".format(mime, dmni56, dmime))
if munbco < 0.5*dmime:
sys.exit("ERROR - Need to increase unburnt C/O mass or decrease dM(IME) as M(C/O) < dM(IME)/2! munbco, dmime = {:.3f}, {:.3f} Msun".format(munbco, dmime))
# compute mass coordinate at which mass fraction starts decreasing from 1
mcoord_ige = mige - 0.5*dmige # IGE mass fraction starts decreasing from 1 at this mass coordinate (unless M(IGE)=0!)
mcoord_ni56 = mcoord_ige + mni56 + 0.5*(dmige-dmni56) # 56Ni mass fraction starts decreasing from 1 at this mass coordinate
mcoord_ime = mcoord_ni56 + mime + 0.5*(dmni56-dmime) # IME mass fraction starts decreasing from 1 at this mass coordinate
if args.debug:
print('mcoord_ige, mcoord_ni56, mcoord_ime = {:.3f} {:.3f} {:.3f}'.format(mcoord_ige, mcoord_ni56, mcoord_ime))
#
# compute Ekin based on W07, Eq. 1 if --ekinw07 is set
#
# Ekin = 1.56 M(Ni) + 1.74 M(Fe) + 1.24 M(IME) - Eg + Eint
#
# (units=1e51 erg for Ekin, Eg, Eint; Msun for masses)
#
# NOTE: Eg and Eint correspond to MCh ejecta, so a warning is
# issued if the requested total mass differs significantly from MCh
if args.ekinw07:
if np.abs(mtot-1.4) > 0.1:
print('')
print("WARNING - total mass differs significantly from MCh")
zzz = input(" ===> apply Eq. 1 of W07 to determine Ekin anyway? [y/n] (default=n): ")
if zzz == 'y':
pass
else:
sys.exit("ERROR - exiting mk_snia_toy_model.py; adjust mtot or remove --ekinw07 option")
ebind = 3.35 # gravitational binding energy for MCh WD from W07 (1e51 erg)
eint = 2.89 # internal energy of MCh WD from W07 (1e51 erg)
ekin = 1.56 * mni56 + 1.74 * mige + 1.24 * mime - ebind + eint
print('')
print('INFO - computed Ekin based on W07 = {:.4e} erg'.format(ekin*1e51))
else:
ekin = args.ekin
print('')
print('INFO - input Ekin = {:.4e} erg'.format(ekin*1e51))
#
# generate density profile at T_END
#
# NOTE: dens and vel are zone-centered
#
vel = [] # velocity coordinate in km/s
rad = [] # radial coordinate in cm
dens = [] # density in g/cm^3
dmass = [] # shell mass in Msun
# ejecta are evolved to final time T_END (days)
tend = args.tend
tend_sec = tend * DAY2SEC
# set innermost shell properties
dvel = args.dvel # cell size in km/s
v0 = 0.0 ; r0 = v0 * tend_sec * 1e5
v1 = v0 + dvel ; r1 = v1 * tend_sec * 1e5
vcen = 0.5*(v0+v1)
rcen = 0.5*(r0+r1)
if args.densprof == 'expon':
print('')
print('INFO - using exponential density profile')
# compute e-folding velocity for density profile (see J99, line after Eq. A6)
# ve = sqrt(Ekin / 6Mtot) (units=cgs)
ve_cgs = np.sqrt(ekin*1e51 / (6*mtot*MSUN))
ve = ve_cgs * 1e-5 # cm/s -> km/s
print(' computed e-folding velocity based on J99 = {:.0f} km/s'.format(ve))
# compute central density at T_END (see J99, Eq. A7)
# rho_c,0 = Mtot / (8 PI ve^3 t^3) (units=cgs)
rhoc0 = mtot * MSUN / (8 * np.pi * ve_cgs**3 * tend_sec**3)
print(' computed central density based on J99 = {:.2e} gcc at {:.0f} d'.format(rhoc0, tend))
# compute rho @ zone center (rhocen) and mean density over [v0,v1] (rhoave = M/V = Int(rho dV) / V)
z0 = v0/ve
z1 = v1/ve
zcen = 0.5*(z0+z1)
rhocen = rhoc0 * np.exp(-zcen)
rhoave = rhoc0 * 3.0 * (np.exp(-z0)*(z0**2+2.0*z0+2.0) - np.exp(-z1)*(z1**2+2.0*z1+2.0)) / (z1**3 - z0**3)
elif args.densprof == 'power':
densexp = args.densexp.split(',')
exp_delta, exp_n = int(densexp[0]), int(densexp[1])
print('')
print('INFO - using broken power-law density profile with delta, n = {:d}, {:d}'.format(exp_delta, exp_n))
if exp_delta >= 3 or exp_n <= 3:
sys.exit("ERROR - we must have delta < 3 and n > 3 for broken power-law density profile! delta, n = {:d}, {:d}".format(exp_delta, exp_n))
# compute transition velocity for broken power-law density profile
fac3 = (1.0/(3.0-exp_delta) + 1.0/(exp_n-3.0))
fac5 = (1.0/(5.0-exp_delta) + 1.0/(exp_n-5.0))
fac = fac3 / fac5
vt_cgs = np.sqrt(fac*2.0*ekin*1e51 / (mtot*MSUN))
vt = vt_cgs * 1e-5 # cm/s -> km/s
print(' computed transition velocity based on K10 = {:.0f} km/s'.format(vt))
# compute central density at T_END
rhoc0 = mtot*MSUN / (4 * np.pi * vt_cgs**3 * tend_sec**3) / fac3
print(' computed central density based on K10 = {:.2e} gcc at {:.0f} d'.format(rhoc0, tend))
# compute rho @ zone center (rhocen) and mean density over [v0,v1] (rhoave = M/V = Int(rho dV) / V)
rhocen = rhoc0 * (vcen/vt)**(-exp_delta)
rhoave = rhoc0 * 3.0 * (v1**(3.0-exp_delta) - v0**(3.0-exp_delta)) / (vt**(-exp_delta) * (3.0-exp_delta)) / (v1**3 - v0**3)
else:
sys.exit("ERROR - unknown density profile: {:s}!".format(args.densprof))
if args.debug:
rhodiff = 1.0 - rhocen/rhoave
print('rhoave, rhocen, diff = {:.4e} {:.4e} {:.4e}'.format(rhoave, rhocen, rhodiff))
dvol = 4./3.*np.pi*(r1**3 - r0**3)
dm = dvol * rhoave / MSUN # to be consistent with mean density
vel.append(vcen) # velocity at zone center
rad.append(rcen) # radius at zone center
dens.append(rhoave) # mean density over [v0,v1]
dmass.append(dm) # mass in zone = Int(rho dV)
while (1.0-sum(dmass)/mtot) > 1e-4:
v0 += dvel ; r0 = v0 * tend_sec * 1e5
v1 = v0 + dvel ; r1 = v1 * tend_sec * 1e5
vcen = 0.5*(v0+v1)
rcen = 0.5*(r0+r1)
# compute rho @ zone center (rhocen) and mean density over [v0,v1] (rhoave = M/V = Int(rho dV) / V)
if args.densprof == 'expon':
z0 = v0/ve
z1 = v1/ve
zcen = 0.5*(z0+z1)
rhocen = rhoc0 * np.exp(-zcen)
rhoave = rhoc0 * 3.0 * (np.exp(-z0)*(z0**2+2.0*z0+2.0) - np.exp(-z1)*(z1**2+2.0*z1+2.0)) / (z1**3 - z0**3)
elif args.densprof == 'power':
if v1 <= vt:
rhocen = rhoc0 * (vcen/vt)**(-exp_delta)
rhoave = rhoc0 * 3.0 * (v1**(3.0-exp_delta) - v0**(3.0-exp_delta)) / (vt**(-exp_delta) * (3.0-exp_delta)) / (v1**3 - v0**3)
elif v0 >= vt:
rhocen = rhoc0 * (vcen/vt)**(-exp_n)
rhoave = rhoc0 * 3.0 * (v1**(3.0-exp_n) - v0**(3.0-exp_n)) / (vt**(-exp_n) * (3.0-exp_n)) / (v1**3 - v0**3)
else:
# special treatment for cell that contains the break
if vcen <= vt:
rhocen = rhoc0 * (vcen/vt)**(-exp_delta)
else:
rhocen = rhoc0 * (vcen/vt)**(-exp_n)
numer0 = (vt**(3.0-exp_delta) - v0**(3.0-exp_delta)) / (vt**(-exp_delta) * (3.0-exp_delta))
numer1 = (v1**(3.0-exp_n) - vt**(3.0-exp_n)) / (vt**(-exp_n) * (3.0-exp_n))
rhoave = rhoc0 * 3.0 * (numer0 + numer1) / (v1**3 - v0**3)
if args.debug:
rhodiff = 1.0 - rhocen/rhoave
print('rhoave, rhocen, diff = {:.4e} {:.4e} {:.4e}'.format(rhoave, rhocen, rhodiff))
dvol = 4./3.*np.pi*(r1**3 - r0**3)
dm = dvol * rhoave / MSUN # to be consistent with mean density
vel.append(vcen) # velocity at zone center
rad.append(rcen) # radius at zone center
dens.append(rhoave) # mean density over [v0,v1]
dmass.append(dm) # mass in zone = Int(rho dV)
# convert lists to arrays
vel = np.array(vel)
rad = np.array(rad)
dens = np.array(dens)
dmass = np.array(dmass)
nd = vel.size # number of zones
if args.debug:
print('nd = ',nd)
# Lagrangian mass coordinate (corresponds to outer zone boundary)
mass = np.cumsum(dmass)
#
# set abundances for stable IGEs, 56Ni, IMEs, unburnt C/O
#
if dmige+dmni56+dmime > EPSILON:
print('')
print('INFO - connecting abundance profiles with {:s} function'.format(args.transprof))
print('')
if mige > EPSILON and dmige > EPSILON:
print(' stable IGE -> 56Ni zone over mass interval [{:.4e},{:.4e}] Msun'.format(mcoord_ige, mcoord_ige+dmige))
if dmni56 > EPSILON:
print(' 56Ni -> IME zone over mass interval [{:.4e},{:.4e}] Msun'.format(mcoord_ni56, mcoord_ni56+dmni56))
if munbco > EPSILON and dmime > EPSILON:
print(' IME -> unburnt C/O zone over mass interval [{:.4e},{:.4e}] Msun'.format(mcoord_ime, mcoord_ime+dmime))
# first calculate the abundance profiles on a high resolution grid of masses
dmfine = args.dmfine
mass_fine = np.arange(dmfine, mass[-1]+dmfine, dmfine)
N_fine = len(mass_fine)
dm_fine = np.ones(N_fine)*dmfine
xige_fine = np.zeros(N_fine)
xni56_fine = np.zeros(N_fine)
xime_fine = np.zeros(N_fine)
xunbco_fine = np.zeros(N_fine)
for i in range(N_fine):
if mass_fine[i] <= mcoord_ige:
xige_fine[i] = 1.0
elif mass_fine[i] <= mcoord_ige + dmige:
if args.transprof == 'linear':
xige_fine[i] = (mcoord_ige - mass_fine[i]) / dmige + 1.0
elif args.transprof == 'invexpon':
xige_fine[i] = 1.0 / (np.exp(args.transscl * (mass_fine[i] - (mcoord_ige + dmige/2.0))) + 1.0)
elif args.transprof == 'cosine':
xige_fine[i] = 1.0 - (1.0 - np.cos(np.pi*(mass_fine[i] - mcoord_ige) / dmige)) / 2.0
xni56_fine[i] = 1.0 - xige_fine[i]
elif mass_fine[i] < mcoord_ni56:
xni56_fine[i] = 1.0
elif mass_fine[i] <= mcoord_ni56 + dmni56:
if args.transprof == 'linear':
xni56_fine[i] = (mcoord_ni56 - mass_fine[i]) / dmni56 + 1.0
elif args.transprof == 'invexpon':
xni56_fine[i] = 1.0 / (np.exp(args.transscl * (mass_fine[i] - (mcoord_ni56 + dmni56/2.0))) + 1.0)
elif args.transprof == 'cosine':
xni56_fine[i] = 1.0 - (1.0 - np.cos(np.pi*(mass_fine[i] - mcoord_ni56) / dmni56)) / 2.0
xime_fine[i] = 1.0 - xni56_fine[i]
elif mass_fine[i] <= mcoord_ime:
xime_fine[i] = 1.0
elif mass_fine[i] <= mcoord_ime + dmime:
if args.transprof == 'linear':
xime_fine[i] = (mcoord_ime - mass_fine[i]) / dmime + 1.0
elif args.transprof == 'invexpon':
xime_fine[i] = 1.0 / (np.exp(args.transscl * (mass_fine[i] - (mcoord_ime + dmime/2.0))) + 1.0)
elif args.transprof == 'cosine':
xime_fine[i] = 1.0 - (1.0 - np.cos(np.pi*(mass_fine[i] - mcoord_ime) / dmime)) / 2.0
xunbco_fine[i] = 1.0 - xime_fine[i]
else:
xunbco_fine[i] = 1.0
if args.debug:
print(mass_fine[i], xige_fine[i], xni56_fine[i], xime_fine[i], xunbco_fine[i])
# Now map the high resolution grid to the actual grid
xige = change_mass_res(dm_fine, xige_fine, dmass)
xni56 = change_mass_res(dm_fine, xni56_fine, dmass)
xime = change_mass_res(dm_fine, xime_fine, dmass)
xunbco = change_mass_res(dm_fine, xunbco_fine, dmass)
# replace part of 56Ni and IME mass with Ti
xti = (xni56 + xime) * xfracti
xni56 = xni56 * (1.0 - xfracti)
xime = xime * (1.0 - xfracti)
# calculate gamma-ray escape time
Sig_tot_t2 = total_column_density_cgs((vel + dvel/2.0)*1e5, dmass*MSUN, xni56)
t0_gamma = np.sqrt(Sig_tot_t2 * KAPPA_GAMMA)
print('')
print('INFO - final ejecta has {:d} zones with Vmax = {:.4e} km/s and'.format(nd, vel.max()))
print('')
print(' Mtot = {:.4e} Msun'.format(np.sum(dmass)))
print(' Ekin = {:.4e} erg'.format(5e9 * np.sum(dmass*MSUN * vel**2))) # 5e9 = 0.5 * 1e10 i.e. 1/2 factor * (km/s->cm/s)^2
print(' M(stable IGE) = {:.4e} Msun of which {:.1f}% Fe and {:.1f}% Ni'.format(np.sum(dmass*xige), (1.0-args.xfracni)*1e2, args.xfracni*1e2))
print(' M(56Ni,t=0) = {:.4e} Msun'.format(np.sum(dmass*xni56)))
sys.stdout.write(' M(IME) = {:.4e} Msun of which'.format(np.sum(dmass*xime)))
for ii, ime in enumerate(imes):
sys.stdout.write(' {:.1f}% {:s}'.format(xfracime[ii]*1e2, ime.capitalize()))
if ii == nime-1:
print('')
else:
if ii == nime-2:
sys.stdout.write(' and')
else:
sys.stdout.write(',')
print(' M(unburnt C/O) = {:.4e} Msun of which {:.1f}% C and {:.1f}% O'.format(np.sum(dmass*xunbco), (1.0-args.xfraco)*1e2, args.xfraco*1e2))
if (xfracti > 0.0):
print('')
print(' NOTE: M(Ti) = {:.4e} Msun in 56Ni and IME zones'.format(np.sum(dmass*xti)))
print('')
print('INFO - gamma-ray escape time is t0_gamma = {:.2f} days'.format(t0_gamma/DAY2SEC))
#
# account for 56Ni decay between t~0 and T_END
#
decay_const_ni56 = np.log(2) / THALF_56NI / DAY2SEC
decay_const_co56 = np.log(2) / THALF_56CO / DAY2SEC
t1 = np.exp(-decay_const_ni56 * tend_sec)
t2 = np.exp(-decay_const_co56 * tend_sec)
t3 = decay_const_ni56 * (t2-t1) / (decay_const_ni56 - decay_const_co56)
xni56_old = xni56.copy()
xni56 = xni56_old * t1
xco56 = xni56_old * t3 # assumes X(56Co)=0 at t=0
xfe56 = xni56_old * (1.0-t1-t3) # assumes X(56Co)=X(56Fe from 56Ni decay)=0 at t=0
print('')
print('INFO - accounted for 56Ni decay at t = {:.2f} d:'.format(tend))
print('')
print(' M(56Ni) = {:.4e} Msun'.format(np.sum(dmass*xni56)))
print(' M(56Co) = {:.4e} Msun'.format(np.sum(dmass*xco56)))
print(' M(56Fe) = {:.4e} Msun'.format(np.sum(dmass*xfe56)))
#
# set individual IGE abundances
#
xni_stable = xige * args.xfracni
xfe_stable = xige * (1.0 - args.xfracni)
xni = xni_stable + xni56
xco = xco56.copy()
xfe = xfe_stable + xfe56 # xfe56 stands for 56Fe from 56Co decay
#
# set individual IME abundances (Mg, Si, S, Ca)
#
# initialize individual IME mass fractions
ximeindiv = {} # dictionary containing IME name and associated mass fraction array, e.g. ximeindiv['si']
for ime in IME_INIT:
ximeindiv[ime] = np.zeros(nd)
# set individual IME mass fractions
for ii, ime in enumerate(imes):
ximeindiv[ime] = xfracime[ii] * xime
#
# set unburnt C/O abundances
#
xo = xunbco * args.xfraco
xc = xunbco * (1.0 - args.xfraco)
#
# check mass fraction normalization
# (we don't include xti in xtot since Ti simply replaces some 56Ni + IMEs)
#
xtot = xni + xco + xfe + xo + xc
for ime in imes:
xtot += ximeindiv[ime]
for i in range(nd):
t1 = 1.0 - 1.0/xtot[i]
if np.abs(t1) > 1e-3:
if not args.nowarn:
print('WARNING - Mass fraction not normalized at depth '+str(i)+' : (1 - 1/Xtot) is '+str(t1))
# set minimum mass fraction here (after nomalization check!)
if args.minxfrac is not None:
if args.minxfrac > MAXMINXFRAC:
sys.exit("ERROR - cannot set minxfrac > {:.4e}: {:.4e}".format(MAXMINXFRAC, args.minxfrac))
print('')
print('INFO - will set mass fractions of > {:.4e} (apart from 56Ni/Co/Fe!)'.format(args.minxfrac))
### IGEs
if np.sum(xni_stable) > 0.0:
xni_stable[np.where(xni_stable < args.minxfrac)] = args.minxfrac
xni = xni_stable + xni56
if np.sum(xfe_stable) > 0.0:
xfe_stable[np.where(xfe_stable < args.minxfrac)] = args.minxfrac
xfe = xfe_stable + xfe56 # xfe56 stands for 56Fe from 56Co decay
xige = xni_stable + xfe_stable
### Titanium
if np.sum(xti) > 0.0:
xti[ | np.where(xti < args.minxfrac) | numpy.where |
Subsets and Splits